text
stringlengths
27
947k
id
stringlengths
18
126
metadata
dict
__index_level_0__
int64
0
80
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for conversion operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Simple conversion function template <typename Destination, typename Source, int Count> __global__ void convert( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, int Count> void run_test(const char dest_name[], const char source_name[], const int range = 4, const int offset = 0) { const int kN = Count; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); auto source_ref = source.host_ref(); auto destination_ref = destination.host_ref(); for (int i = 0; i < kN; ++i) { source_ref.at({0, i}) = Source(i % range + offset); } source.sync_device(); convert<Destination, Source, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination_ref.at({0, i})) == float(source_ref.at({0, i}))) << "Destination type: " << dest_name << " "<< float(destination_ref.at({0, i})) << ", Source type: " << source_name << " " << float(source_ref.at({0, i})) << ", Count: " << Count; } } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, typename ScaleFactor, int Count> __global__ void convert_with_scale_factor( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source, cutlass::Array<ScaleFactor, Count> const *scale_factor) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source, *scale_factor); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Destination, typename Source, typename ScaleFactor, int Count> void run_test_with_scalefactor(const char dest_name[], const char source_name[], const char scale_factor_name[], const int range = 4, const int offset = 0) { const int kN = Count; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); cutlass::HostTensor<ScaleFactor, cutlass::layout::RowMajor> scale_factor({1, kN}); auto source_ref = source.host_ref(); auto destination_ref = destination.host_ref(); auto scale_factor_ref = scale_factor.host_ref(); for (int i = 0; i < kN; ++i) { source_ref.at({0, i}) = Source(i % range + offset); } for (int i = 0; i < kN; ++i) { scale_factor_ref.at({0, i}) = ScaleFactor(1 + i % 8); } source.sync_device(); scale_factor.sync_device(); convert_with_scale_factor<Destination, Source, ScaleFactor, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()), reinterpret_cast<cutlass::Array<ScaleFactor, kN> const *>(scale_factor.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { float ref = float(source_ref.at({0, i})) / float(scale_factor_ref.at({0, i})); bool pass = float(destination_ref.at({0, i})) == ref; EXPECT_TRUE(pass) << "Destination type: " << dest_name << " "<< float(destination_ref.at({0, i})) << std::endl << ", Source type: " << source_name << " " << float(source_ref.at({0, i})) << std::endl << ", Scalefactor type: " << source_name << " " << float(scale_factor_ref.at({0, i})) << std::endl << ", idx: " << i << std::endl; } } } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_f16_rn) { constexpr int kN = 1; using Source = float; const char source_name[] = "float"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32x2_to_f16x2_rn) { constexpr int kN = 2; using Source = float; const char source_name[] = "float"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32x8_to_f16x8_rn) { constexpr int kN = 8; using Source = float; const char source_name[] = "float"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f16_to_f32_rn) { int const kN = 1; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16x8_to_f32x8_rn) { int const kN = 8; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_fe4m3_rn) { int const kN = 1; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32_to_fe4m3_rn_array) { int const kN = 27; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32_to_fe5m2_rn) { int const kN = 1; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f32_to_fe5m2_rn_array) { int const kN = 27; using Source = float; const char source_name[] = "float"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, f16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::half_t; const char source_name[] = "half_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe4m3_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, bf16_to_fe5m2_rn_array) { int const kN = 27; using Source = cutlass::bfloat16_t; const char source_name[] = "bfloat16_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, fe4m3_to_fe5m2_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_fe5m2_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_fe4m3_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_fe4m3_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f32_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32x8_to_s8x8_rn) { int const kN = 8; using Source = float; const char source_name[] = "float"; using Destination = int8_t; const char dest_name[] = "int8_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f32_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_f32_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = float; const char dest_name[] = "float"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_f16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_f16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_f16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::half_t; const char dest_name[] = "half_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe4m3_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e4m3_t; const char source_name[] = "float_e4m3_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_bf16_rn) { int const kN = 1; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_to_bf16_array) { int const kN = 27; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = cutlass::bfloat16_t; const char dest_name[] = "bfloat16_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } // These are included as regression tests for a special case when N = 4. TEST(NumericConversion, int4b_t_to_fe5m2_t_array_4) { int const kN = 4; using Source = cutlass::int4b_t; const char source_name[] = "int4b_t"; using Destination = cutlass::float_e5m2_t; const char dest_name[] = "float_e5m2_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, int_to_fe4m3_t_array_4) { int const kN = 4; using Source = int; const char source_name[] = "int"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, int2b_t_to_fe4m3_t_array_4) { int const kN = 4; using Source = cutlass::int2b_t; const char source_name[] = "int2b_t"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, fe5m2_t_to_double_array_4) { int const kN = 4; using Source = cutlass::float_e5m2_t; const char source_name[] = "float_e5m2_t"; using Destination = double; const char dest_name[] = "double"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } TEST(NumericConversion, int_to_fe4m3_t_array_32) { int const kN = 32; using Source = int; const char source_name[] = "int"; using Destination = cutlass::float_e4m3_t; const char dest_name[] = "float_e4m3_t"; test::core::kernel::run_test<Destination, Source, kN>(dest_name, source_name); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> struct GetName { static constexpr char name[] = "UNSUPPORTED"; }; template <> struct GetName<cutlass::int4b_t> { static constexpr char name[] = "int4b_t"; }; template <> struct GetName<uint8_t> { static constexpr char name[] = "uint8_t"; }; template <> struct GetName<int8_t> { static constexpr char name[] = "int8_t"; }; template <> struct GetName<cutlass::float_e4m3_t> { static constexpr char name[] = "float_e4m3_t"; }; template <> struct GetName<cutlass::half_t> { static constexpr char name[] = "half_t"; }; template <> struct GetName<cutlass::bfloat16_t> { static constexpr char name[] = "bfloat16_t"; }; template <> struct GetName<float> { static constexpr char name[] = "float"; }; template <typename Result_, typename Source_> struct ResultSourcePair { using Result = Result_; using Source = Source_; }; template <typename ResultSourcePair> class VectorArrayConverterTest : public testing::Test { public: using Result = typename ResultSourcePair::Result; using Source = typename ResultSourcePair::Source; template <int N> static void emit_test() { const int range = 1 << cutlass::sizeof_bits<Source>::value; const int offset = cutlass::platform::numeric_limits<Source>::lowest(); test::core::kernel::run_test<Result, Source, N>(GetName<Result>::name, GetName<Source>::name, range, offset); } }; using VectorConvertTypes = ::testing::Types< ResultSourcePair<float, int8_t>, ResultSourcePair<float, uint8_t>, ResultSourcePair<cutlass::half_t, int8_t>, ResultSourcePair<cutlass::half_t, uint8_t>, ResultSourcePair<cutlass::bfloat16_t, uint8_t>, ResultSourcePair<cutlass::bfloat16_t, int8_t>, ResultSourcePair<cutlass::float_e4m3_t, cutlass::int4b_t>, ResultSourcePair<cutlass::half_t, cutlass::int4b_t>, ResultSourcePair<cutlass::bfloat16_t, cutlass::int4b_t>, ResultSourcePair<float, cutlass::int4b_t> >; TYPED_TEST_SUITE(VectorArrayConverterTest, VectorConvertTypes); TYPED_TEST(VectorArrayConverterTest, array_1) { TestFixture::template emit_test<1>(); } TYPED_TEST(VectorArrayConverterTest, array_2) { TestFixture::template emit_test<2>(); } TYPED_TEST(VectorArrayConverterTest, array_3) { TestFixture::template emit_test<3>(); } TYPED_TEST(VectorArrayConverterTest, array_4) { TestFixture::template emit_test<4>(); } TYPED_TEST(VectorArrayConverterTest, array_5) { TestFixture::template emit_test<5>(); } TYPED_TEST(VectorArrayConverterTest, array_8) { TestFixture::template emit_test<8>(); } TYPED_TEST(VectorArrayConverterTest, array_10) { // N > 8 and N is not a multiple of 4 TestFixture::template emit_test<10>(); } TYPED_TEST(VectorArrayConverterTest, array_12) { // N > 8 and N is a multiple of 4 TestFixture::template emit_test<12>(); } TYPED_TEST(VectorArrayConverterTest, array_16) { // N > 8 and N is a multiple of 8 TestFixture::template emit_test<16>(); } TYPED_TEST(VectorArrayConverterTest, array_17) { // N > 8 and N is not a multiple of 8 TestFixture::template emit_test<17>(); } TYPED_TEST(VectorArrayConverterTest, array_27) { // Test entire conversion range with residue (for int4) TestFixture::template emit_test<27>(); } TYPED_TEST(VectorArrayConverterTest, array_31) { // Force use of converters for 16, 8, 4, 2 and scalar // if max width is 16 TestFixture::template emit_test<31>(); } TYPED_TEST(VectorArrayConverterTest, array_63) { // Force use of converters for 32, 16, 8, 4, 2 and scalar // if max width is 32 TestFixture::template emit_test<63>(); } TYPED_TEST(VectorArrayConverterTest, array_256) { // Test entire conversion range (for int8) TestFixture::template emit_test<256>(); } TYPED_TEST(VectorArrayConverterTest, array_259) { // Force use of 4, 2 and scalar converter (if max width is 4) TestFixture::template emit_test<259>(); } TYPED_TEST(VectorArrayConverterTest, array_263) { // Force use of 8, 4, 2 and scalar converter (if max width is 8) TestFixture::template emit_test<263>(); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/core/numeric_conversion.cu/0
{ "file_path": "cutlass/test/unit/core/numeric_conversion.cu", "repo_id": "cutlass", "token_count": 8188 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/gemm/warp/mma_tensor_op_sm70.h" #include "cutlass/epilogue/warp/fragment_iterator_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_64x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_volta_tensor_op_256x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<256, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = cutlass::half_t; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = cutlass::half_t; using ElementCompute = cutlass::half_t; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementC>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Mixed: F32 accumulation // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_256x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<256, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f16_f32_volta_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // F32 accumulation, F32 output // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x64_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_256x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<256, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_128x64_64x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, f32_volta_tensor_op_64x128_32x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 128 / cutlass::sizeof_bits<ElementOutput>::value; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // This works TEST(SM70_Epilogue_threadblock_epilogue, vec8_f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 8; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } // This works TEST(SM70_Epilogue_threadblock_epilogue, vec2_f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 2; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// // This fails TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_64x64_32x32x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<64, 64, 4>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM70_Epilogue_threadblock_epilogue, vec1_f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = float; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_128x128_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 128, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } TEST(SM70_Epilogue_threadblock_epilogue, vec1_f16_f32_volta_tensor_op_128x256_64x64x4) { // // Define the warp-level matrix multiply // using Shape = cutlass::gemm::GemmShape<128, 256, 4>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 4>; using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementC = float; using LayoutA = cutlass::layout::ColumnMajorVoltaTensorOpMultiplicandCongruous<cutlass::sizeof_bits<ElementA>::value>; using LayoutB = cutlass::layout::RowMajorVoltaTensorOpMultiplicandBCongruous<cutlass::sizeof_bits<ElementB>::value>; using ElementOutput = cutlass::half_t; using ElementAccumulator = ElementC; using ElementCompute = ElementC; using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< cutlass::gemm::GemmShape<16, 16, 4>, 32, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, cutlass::layout::RowMajor, cutlass::arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; using WarpMmaTensorOp = cutlass::gemm::warp::MmaVoltaTensorOp< WarpShape, ElementA, LayoutA, ElementB, LayoutB, ElementC, cutlass::layout::RowMajor, Policy >; int const kPartitionsK = 1; int const kElementsPerAccess = 1; using ThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapVoltaTensorOp< Shape, WarpShape, kPartitionsK, ElementC, kElementsPerAccess, ElementAccumulator>::Type; // // Output operator // using OutputOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, kElementsPerAccess, ElementAccumulator, ElementCompute >; // // Define the epilogue // using Epilogue = typename cutlass::epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, kPartitionsK, OutputOp, kElementsPerAccess >::Epilogue; // // Instantiate epilogue // EpilogueTestbed<Epilogue> testbed; bool passed = testbed.run_all(); EXPECT_TRUE(passed); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/epilogue/threadblock/epilogue_volta_tensor_op.cu/0
{ "file_path": "cutlass/test/unit/epilogue/threadblock/epilogue_volta_tensor_op.cu", "repo_id": "cutlass", "token_count": 27577 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide Rank 2k update interface */ #pragma once #include <iostream> #include <fstream> #include <sstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/blas3.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/distribution.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/reference/host/rank_2k.h" #include "cutlass/util/reference/host/rank_2k_complex.h" #include "testbed_utils.h" namespace test { namespace gemm { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> struct TestbedRank2KUniversal { using ElementA = typename Rank2K::ElementA; using ElementB = typename Rank2K::ElementB; using ElementC = typename Rank2K::ElementC; using ElementAccumulator = typename Rank2K::ElementAccumulator; using ElementCompute = typename Rank2K::Rank2Kkernel::Epilogue::OutputOp::ElementCompute; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<typename Rank2K::ElementA, typename Rank2K::LayoutA> tensor_A; cutlass::HostTensor<typename Rank2K::ElementB, typename Rank2K::LayoutB> tensor_B; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> tensor_C; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> tensor_D; cutlass::HostTensor<typename Rank2K::ElementC, typename Rank2K::LayoutC> reference_D; // // Methods // TestbedRank2KUniversal( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential( view.data(), view.capacity()); } else { EXPECT_TRUE(false) << "Input distribution not implemented"; return false; } return true; } /// Helper to initialize a tensor view template <typename Element, typename Layout> bool initialize_symmetric_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed, int mantissa_in_bits) { if (dist_kind == cutlass::Distribution::Uniform) { double scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Rank2K::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } cutlass::reference::host::TensorFillSymmetricRandomUniform( view, seed, Rank2K::kFillModeC, scope_max, scope_min, mantissa_in_bits); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillSymmetricRandomGaussian( view, seed, Rank2K::kFillModeC, 0, 0.5, mantissa_in_bits); } else { EXPECT_TRUE(false) << "Input distribution (symmetric tensor) not implemented"; return false; } return true; } /// Initializes data structures void initialize(cutlass::gemm::GemmCoord problem_size) { // // Allocate the Rank2K workspace // tensor_A.resize(problem_size.mk()); tensor_B.resize(problem_size.mk()); tensor_C.resize(problem_size.mn()); tensor_D.resize(problem_size.mn()); reference_D.resize(problem_size.mn(), false); EXPECT_TRUE(initialize_tensor(tensor_A.host_view(), init_A, seed + 2019, cutlass::MantissaInBits<typename Rank2K::ElementA>::bits)); EXPECT_TRUE(initialize_tensor(tensor_B.host_view(), init_B, seed + 2018, cutlass::MantissaInBits<typename Rank2K::ElementB>::bits)); EXPECT_TRUE(initialize_symmetric_tensor(tensor_C.host_view(), init_C, seed + 2017, cutlass::MantissaInBits<typename Rank2K::ElementC>::bits)); // It is possible to randomly initialize to all zeros, so override this with non-zeros // in the upper left corner of each operand. tensor_A.host_view().at({0, 0}) = typename Rank2K::ElementA(1); tensor_B.host_view().at({0, 0}) = typename Rank2K::ElementB(1); tensor_C.host_view().at({0, 0}) = typename Rank2K::ElementC(1); cutlass::reference::host::TensorCopy(reference_D.host_view(), tensor_C.host_view()); tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_D.sync_device(); } /// Compares computed reference with device reference and outputs to a file if incorrect bool compare_reference( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { tensor_D.sync_host(); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_A.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_B.host_view()), 0); EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_C.host_view()), 0); if (tensor_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(tensor_D.host_view()), 0); if (reference_D.size() > 1) EXPECT_GT(cutlass::reference::host::TensorNorm(reference_D.host_view()), 0); double l2_norm = cutlass::reference::host::TensorRelativeErrorMetric(reference_D.host_view(), tensor_D.host_view()); bool passed = l2_norm < cutlass::MantissaInBits<typename Rank2K::ElementA>::error; return passed; } /// Verifies the result is a Rank2K bool verify( cutlass::gemm::GemmCoord problem_size, ElementCompute alpha, ElementCompute beta) { // // Verify // cutlass::reference::host::Rank2KComplex< typename Rank2K::ElementA, typename Rank2K::LayoutA, typename Rank2K::ElementB, typename Rank2K::LayoutB, typename Rank2K::ElementC, typename Rank2K::LayoutC, ElementCompute, ElementAccumulator >( problem_size, alpha, tensor_A.host_ref(), Rank2K::kTransformA, tensor_B.host_ref(), Rank2K::kTransformB, beta, tensor_C.host_ref(), reference_D.host_ref(), ElementAccumulator(0), Rank2K::kFillModeC, Rank2K::kBlasMode ); return compare_reference(problem_size, alpha, beta); } /// Returns true if the CUDA device is sufficient to execute the kernel. bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Rank2K::Rank2Kkernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord problem_size, int batch_count = 1, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(0)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 std::cout << "[TestbedRank2KUniversal::run()] problem(m, n, k): " << problem_size << " alpha: " << ElementCompute(alpha) << " beta: " << ElementCompute(beta) << std::endl; #endif this->initialize(problem_size); // // Initialize the Rank2K operator // typename Rank2K::Arguments arguments{ mode, problem_size, batch_count, {alpha, beta}, tensor_A.device_data(), tensor_B.device_data(), tensor_C.device_data(), tensor_D.device_data(), problem_size.n() * problem_size.k(), problem_size.n() * problem_size.k(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_A.layout().stride(0), tensor_B.layout().stride(0), tensor_C.layout().stride(0), tensor_D.layout().stride(0) }; Rank2K rank2k_op; size_t workspace_size = Rank2K::get_workspace_size(arguments); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = rank2k_op.initialize(arguments, workspace.get()); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Run the Rank2K // status = rank2k_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess) << to_string(status); // // Verify // bool passed = this->verify(problem_size, alpha, beta); //if (true) { if (!passed) { std::stringstream fname; fname << "error_Rank2k_device_" << "fill_mode_c_" << (Rank2K::kFillModeC == cutlass::FillMode::kLower ? "lower_" : (Rank2K::kFillModeC == cutlass::FillMode::kUpper ? "upper_" : "invalid_")) << "mnk_" << problem_size.m() << "x" << problem_size.n() << "x" << problem_size.k() << "_" << Rank2K::ThreadblockShape::kM << "x" << Rank2K::ThreadblockShape::kN << "x" << Rank2K::ThreadblockShape::kK << "_" << Rank2K::WarpShape::kM << "x" << Rank2K::WarpShape::kN << "x" << Rank2K::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n" << "\nD reference:\n" << reference_D.host_view() << "\n" << "\nD computed:\n" << tensor_D.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Rank2K> bool TestRank2kUniversal( cutlass::gemm::GemmCoord const & problem_size, cutlass::gemm::GemmUniversalMode mode, int batch_count, double alpha = 1.0, double beta = 2.0) { bool passed = true; TestbedRank2KUniversal<Rank2K> testbed; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); return passed; } template <typename Rank2K> bool TestAllRank2KUniversal() { bool passed = true; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Rank2K::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages - kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; double problem_alpha[] = { 1.0, 3.25 }; double problem_beta[] = { 0.0, 2.15 }; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { // skip very small K problems //if (k / batch_count < 2 * Rank2K::ThreadblockShape::kK) { // continue; //} } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<Rank2K> testbed; passed = testbed.run( mode, problem_size, batch_count, cutlass::from_real<ElementCompute>(alpha), cutlass::from_real<ElementCompute>(beta) ); if (!passed) { return false; } } } } } } } return passed; } template <typename Rank2K> bool TestAllRank2KHermitianUniversal() { bool passed = true; using ElementCompute = typename Rank2K::EpilogueOutputOp::ElementCompute; using ElementAccumulator = typename Rank2K::ElementAccumulator; int const kMinimumOperandElementSize = int(cutlass::sizeof_bits<typename Rank2K::ElementA>::value); int const kAlignment = cutlass::platform::is_same< typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value ? 1 : 128 / kMinimumOperandElementSize; // int8_t gemm alignment constraints int const kAlignmentM = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::ColumnMajor>::value ? 4 : kAlignment; int const kAlignmentN = kAlignmentM; int const kAlignmentK = cutlass::platform::is_same<typename Rank2K::OperatorClass, cutlass::arch::OpClassSimt>::value && cutlass::platform::is_same<typename Rank2K::ElementA, int8_t>::value && cutlass::platform::is_same<typename Rank2K::LayoutA, cutlass::layout::RowMajor>::value ? 4 : kAlignment; cutlass::gemm::GemmUniversalMode modes[] = { cutlass::gemm::GemmUniversalMode::kGemm, }; int problem_size_n[] = { kAlignmentN, 512 - 2*kAlignmentN }; int problem_size_k[] = { kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages - kAlignmentK, Rank2K::ThreadblockShape::kK * Rank2K::kStages * 3 - kAlignmentK }; int batch_counts[] = { // may be interpretted as batch count or split-K slices 1 // Just running one batch for now (removing 2, 3, 5, 7) }; /* Complex alpha for HER2K */ ElementAccumulator problem_alpha[] = { {1.0}, {1.25, 3.25}, {-0.25, -2.25} }; ElementAccumulator problem_beta[] = { 0.0, -2.25 }; for (cutlass::gemm::GemmUniversalMode mode : modes) { for (int n : problem_size_n) { for (int k : problem_size_k) { for (int batch_count : batch_counts) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { if (mode == cutlass::gemm::GemmUniversalMode::kGemm || mode == cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel) { // skip very small K problems //if (k / batch_count < 2 * Rank2K::ThreadblockShape::kK) { // continue; //} } cutlass::gemm::GemmCoord problem_size(n, n, k); TestbedRank2KUniversal<Rank2K> testbed; passed = testbed.run( mode, problem_size, batch_count, alpha, beta ); if (!passed) { return false; } } } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace test /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/gemm/device/testbed_rank2k_universal.h/0
{ "file_path": "cutlass/test/unit/gemm/device/testbed_rank2k_universal.h", "repo_id": "cutlass", "token_count": 8644 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "mma_pipelined_testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // sgemm_NT ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_sgemm, sgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass, 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_sgemm, sgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, float, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, float, // ElementB, cutlass::layout::RowMajor, // LayoutB, float, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // dgemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_dgemm, dgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_dgemm, dgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, double, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, double, // ElementB, cutlass::layout::RowMajor, // LayoutB, double, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // igemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_igemm, igemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_igemm, igemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, int, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // hgemm_NN ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_hgemm, hgemm_nt_32x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_64x64x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_32x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<32, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(32, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_64x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 128, 16); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM50_hgemm, hgemm_nt_128x128x8_32x64x1) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 8>, // ThreadblockShape, cutlass::gemm::GemmShape<32, 64, 8>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, // InstructionShape, cutlass::half_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, cutlass::half_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, cutlass::half_t, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 48); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } ///////////////////////////////////////////////////////////////////////////////////////////////// // igemm_NT DP4A ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_64x64x32_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 4096); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_64x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_256x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 256, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_128x256x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 256, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nt_256x128x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::RowMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 128, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x32_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 32>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 32>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 4096); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_64x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x64x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 2, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 128, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 128, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 4, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_256x128x16_64x64x8) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 256, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<128, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 256, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_128x256x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<128, 256, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(128, 256, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_tn_256x128x64_64x64x16) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<256, 128, 64>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 64>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::RowMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(256, 128, 64); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 8, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); } TEST(SM61_igemm, igemm_int8_nn_64x64x16_64x64x4) { using MmaCore = typename cutlass::gemm::threadblock::DefaultMmaCore< cutlass::gemm::GemmShape<64, 64, 16>, // ThreadblockShape, cutlass::gemm::GemmShape<64, 64, 16>, // WarpShape, cutlass::gemm::GemmShape<1, 1, 4>, // InstructionShape, int8_t, // ElementA, cutlass::layout::ColumnMajor, // LayoutA, int8_t, // ElementB, cutlass::layout::ColumnMajor, // LayoutB, int, // ElementC, cutlass::layout::RowMajor, // LayoutC, cutlass::arch::OpClassSimt, // OpClass 2, // Stages, cutlass::arch::OpMultiplyAdd // Operator, >; cutlass::gemm::GemmCoord problem_size(64, 64, 32); float alpha = 1.f; float beta = 0.0f; dim3 grid(1, 1); dim3 block(32, 1, 1); test::gemm::threadblock::Testbed<MmaCore>( problem_size.m(), problem_size.n(), problem_size.k(), alpha, beta) .run(grid, block, cutlass::Distribution::Uniform, cutlass::Distribution::Uniform); }
cutlass/test/unit/gemm/threadblock/mma_pipelined_simt.cu/0
{ "file_path": "cutlass/test/unit/gemm/threadblock/mma_pipelined_simt.cu", "repo_id": "cutlass", "token_count": 25788 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief unit tests for tensor layout */ #include "../common/cutlass_unit_test.h" #include "cutlass/layout/tensor.h" #include "cutlass/tensor_coord.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace layout { void test_NHWC_layout(int n_size, int h_size, int w_size, int c_size) { int ldc = c_size + 1; int ldw = ldc * (w_size + 2); int ldh = ldw * (h_size + 3); cutlass::layout::TensorNHWC::Stride tensor_stride({ ldc, ldw, ldh }); cutlass::layout::TensorNHWC tensor_nhwc(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int h_idx = 0; h_idx < h_size; h_idx++) { for (int w_idx = 0; w_idx < w_size; w_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); auto ptr_offset = tensor_nhwc(tensor_coord); decltype(ptr_offset) reference_offset = c_idx + w_idx * ldc + h_idx * ldw + n_idx * ldh; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nhwc.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nhwc.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldh * n_size; EXPECT_EQ(capacity, referece_capacity); // test packed auto packed_tensor_layout = tensor_nhwc.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); auto packed_stride = packed_tensor_layout.stride(); EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ c_size, w_size * c_size, h_size * w_size * c_size })); } void test_NCHW_layout(int n_size, int c_size, int h_size, int w_size) { int ldw = w_size + 1; int ldh = ldw * (h_size + 2); int ldc = ldh * (c_size + 1); cutlass::layout::TensorNCHW::Stride tensor_stride({ ldw, ldh, ldc }); cutlass::layout::TensorNCHW tensor_nchw(tensor_stride); // test pointer offset for (int n_idx = 0; n_idx < n_size; n_idx++) { for (int c_idx = 0; c_idx < c_size; c_idx++) { for (int h_idx = 0; h_idx < w_size; h_idx++) { for (int w_idx = 0; w_idx < c_size; w_idx++) { // tensor4DCoord is always created in nhwc order cutlass::Tensor4DCoord tensor_coord(n_idx, h_idx, w_idx, c_idx); auto ptr_offset = tensor_nchw(tensor_coord); decltype(ptr_offset) reference_offset = w_idx + h_idx * ldw + c_idx * ldh + n_idx * ldc; EXPECT_EQ(ptr_offset, reference_offset); } } } } // test stride auto stride = tensor_nchw.stride(); EXPECT_EQ(stride, tensor_stride); // test capacity auto capacity = tensor_nchw.capacity(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); decltype(capacity) referece_capacity = ldc * n_size; EXPECT_EQ(capacity, referece_capacity); // test packed auto packed_tensor_layout = tensor_nchw.packed(cutlass::Tensor4DCoord(n_size, h_size, w_size, c_size)); auto packed_stride = packed_tensor_layout.stride(); EXPECT_EQ(packed_stride, cutlass::layout::TensorNHWC::Stride({ w_size, w_size * h_size, w_size * h_size * c_size })); } } // namespace layout } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_Tensor, NHWC_32_12_10_14) { int n_size = 32; int h_size = 12; int w_size = 10; int c_size = 14; test::layout::test_NHWC_layout(n_size, h_size, w_size, c_size); } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Layout_Tensor, NCHW_32_12_10_14) { int n_size = 32; int c_size = 12; int h_size = 10; int w_size = 14; test::layout::test_NCHW_layout(n_size, c_size, h_size, w_size); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/layout/tensor.cu/0
{ "file_path": "cutlass/test/unit/layout/tensor.cu", "repo_id": "cutlass", "token_count": 2305 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit test for the PipelineTmaAsync class */ #define KERNEL_DBG_TRACE false #include "../common/cutlass_unit_test.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include <cute/arch/cluster_sm90.hpp> #include <cutlass/util/reference/host/gemm.h> #include <cutlass/cluster_launch.hpp> #include "cutlass/core_io.h" #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "testbed.h" #include "cutlass/pipeline/pipeline.hpp" #include "cutlass/arch/barrier.h" #include "cute/arch/cluster_sm90.hpp" using namespace cute; //////////////////// KERNEL ///////////////////////// template <uint32_t Stages> struct SharedStorage { typename cutlass::PipelineTmaAsync<Stages>::SharedStorage storage; }; // Goal of this kernel is to complete deadlock-free template <class ClusterShape, uint32_t NumStages> __global__ static void pipeline_device(uint32_t const NumIterations) { extern __shared__ char shared_memory[]; using MainloopPipeline = cutlass::PipelineTmaAsync<NumStages>; using PipelineState = cutlass::PipelineState<NumStages>; using SharedStorage = SharedStorage<NumStages>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); [[maybe_unused]] auto cta_layout = Layout<ClusterShape>{}; // (m,n) -> cta_id int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int warp_group_thread_idx = threadIdx.x % 128; dim3 block_id_in_cluster = cute::block_id_in_cluster(); auto cluster_shape = ClusterShape{}; // #Producers = #RowsInCluster + #ColsInCluster - 1 uint32_t const NumProducers = cute::size<0>(cluster_shape) + cute::size<1>(cluster_shape) - 1; uint32_t const TmaTransactionBytes = sizeof(uint32_t) * NumProducers; uint32_t const per_cta_bytes = sizeof(uint32_t); // mbarrier.init typename MainloopPipeline::Params params; params.transaction_bytes = TmaTransactionBytes; params.role = MainloopPipeline::ThreadCategory::ProducerConsumer; params.is_leader = warp_group_thread_idx == 0; params.num_consumers = 128; MainloopPipeline pipeline(shared_storage.storage, params, cluster_shape); __syncthreads(); // Ensure All CTAs in Cluster have completed init before issuing commits cute::cluster_arrive_relaxed(); cute::cluster_wait(); // Total number of gemm_k_iterations auto mma_k_iterations = NumIterations; auto tma_k_iterations = NumIterations; PipelineState smem_pipe_read; // For the DMA (prologue) - we start with an opposite phase - since we skip all waits // i.e., we know that the buffer is indeed empty PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>(); PipelineState smem_pipe_release; int K_TILE_MMAS = 1; int lane_predicate = cute::elect_one_sync(); int k_pipe_tma_prologue = min(NumStages, tma_k_iterations); // DMA Prologue (Loads) CUTLASS_PRAGMA_UNROLL for(int i = 0; i < k_pipe_tma_prologue; ++i) { pipeline.producer_acquire(smem_pipe_write); // cp.async.bulk.tensor would typically happen here pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; } tma_k_iterations -= k_pipe_tma_prologue; // MMA Prologue (Compute) - modeling inflight MMAs for (int iter = 0; iter < K_TILE_MMAS; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here ++smem_pipe_read; } mma_k_iterations -= K_TILE_MMAS; CUTLASS_PRAGMA_NO_UNROLL for (int iter = 0; iter < mma_k_iterations; ++iter) { pipeline.consumer_wait(smem_pipe_read); warpgroup_arrive(); // GMMA would typically happen here pipeline.consumer_release(smem_pipe_release); if (lane_predicate && (warp_idx == 0) && (tma_k_iterations > 0)) { pipeline.producer_acquire(smem_pipe_write); // cp.async.bulk.tensor would typically happen here pipeline.producer_commit(smem_pipe_write, per_cta_bytes); ++smem_pipe_write; --tma_k_iterations; } // next read stage ++smem_pipe_read; ++smem_pipe_release; } // To make sure remote SMEM doesn't get destoryed cute::cluster_arrive(); cute::cluster_wait(); } ///////////////////////////////////////////////////// /// Device NT GMMA + TMA specialized template<uint32_t Stages_, typename ClusterShape_> struct PipelineTest { // // Data members // static constexpr uint32_t Stages = Stages_; static constexpr uint32_t kBlockSize = 128; using ClusterShape = ClusterShape_; // // Methods // // Ctor PipelineTest(){}; // Run CuTe GEMM kernel cudaError_t run(uint32_t const kNumIters, cudaStream_t stream = 0) { float elapsed_ms = 0.0f; // Pipeline (multistage pipeline) [[maybe_unused]] auto num_stages = Int<Stages>{}; auto cluster_shape = Shape<Int<ClusterShape::kM>, Int<ClusterShape::kN>, _1>{}; // // Configure and launch // int iterations = 1; cudaEvent_t events[2]; cudaError_t result; for (cudaEvent_t & event : events) { result = cudaEventCreate(&event); if (result != cudaSuccess) { std::cerr << "Error: Failed to create event."; return result; } } result = cudaEventRecord(events[0]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record start event."; return result; } for (int iter = 0; iter < iterations; ++iter) { int smem_size = int(sizeof(SharedStorage<Stages>)); result = cudaFuncSetAttribute( pipeline_device<decltype(cluster_shape), Stages>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); // Launch a single Cluster, with 128 thread per CTA dim3 dimCluster(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimGrid(size<0>(cluster_shape), size<1>(cluster_shape), 1); dim3 dimBlock(kBlockSize,1,1); const void* kernel = (const void*)pipeline_device<decltype(cluster_shape), Stages>; int iters = kNumIters; void* kernel_params[] = {reinterpret_cast<void*>(&iters)}; cutlass::ClusterLauncher::launch(dimGrid, dimCluster, dimBlock, smem_size, stream, kernel, kernel_params); } // profiling loop ends result = cudaEventRecord(events[1]); if (result != cudaSuccess) { std::cerr << "Error: Failed to record stop event."; return result; } result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Error: cudaDeviceSynchronize() failed" << std::endl; return result; } result = cudaEventElapsedTime(&elapsed_ms, events[0], events[1]); if (result != cudaSuccess) { std::cerr << "Failed to create event."; return result; } for (cudaEvent_t & event : events) { (void)cudaEventDestroy(event); } return cudaSuccess; } }; #if CUDA_12_0_SM90_FEATURES_SUPPORTED TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x1_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage5) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 5; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x2_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 2, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x4_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 4, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x2_Stage10) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 2, 1>; static constexpr uint32_t Stages = 10; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x1_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 1, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster1x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<1, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster2x4_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<2, 4, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage2) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 2; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } TEST(SM90_Verify_PipelineTmaAsync, Cluster4x2_Stage7) { Options options; using ClusterShape = cutlass::gemm::GemmShape<4, 2, 1>; static constexpr uint32_t Stages = 7; using Test = PipelineTest<Stages, ClusterShape>; Testbed<Test> testbed(options); EXPECT_TRUE(testbed.verification()); } #endif
cutlass/test/unit/pipeline/pipeline_tma_async.cu/0
{ "file_path": "cutlass/test/unit/pipeline/pipeline_tma_async.cu", "repo_id": "cutlass", "token_count": 5494 }
60
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Manifest of CUTLASS Library This is the root of the data structure containing CUTLASS objects */ #pragma once #include <list> #include <memory> #include <map> /////////////////////////////////////////////////////////////////////////////////////////////////// #include "library.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// // Forward declaration class Manifest; // init and insert all cutlass gemm operations in manifest object (procedurally generated using generator.py) void initialize_all(Manifest &manifest); // init and insert all reduction op in manifest object (manually instantiated in library/reduction) void initialize_all_reduction_op(Manifest &manifest); ///////////////////////////////////////////////////////////////////////////////////////////////////////// /// List of operations using OperationVector = std::vector<std::unique_ptr<Operation>>; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Manifest of CUTLASS Library class Manifest { private: /// Operation provider Provider provider_; /// Global list of operations OperationVector operations_; public: Manifest (Provider provider = library::Provider::kCUTLASS) : provider_(provider) { } /// Top-level initialization Status initialize(); /// Used for initialization void reserve(size_t operation_count); /// Graceful shutdown Status release(); /// Appends an operation and takes ownership void append(Operation *operation_ptr) {\ // This function is inline s.t. it is present in generated libraries // without having to compile or link in manifest.cpp operations_.emplace_back(operation_ptr); } /// Returns an iterator to the first operation OperationVector const &operations() const; /// Returns a const iterator OperationVector::const_iterator begin() const; /// Returns a const iterator OperationVector::const_iterator end() const; }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/include/cutlass/library/manifest.h/0
{ "file_path": "cutlass/tools/library/include/cutlass/library/manifest.h", "repo_id": "cutlass", "token_count": 964 }
61
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief */ #include <string> #include <stdexcept> #include <sstream> #include "cutlass/library/util.h" #include "cutlass/profiler/problem_space.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static T lexical_cast(std::string const &str) { std::stringstream ss; T value; ss << str; ss >> value; return value; } ///////////////////////////////////////////////////////////////////////////////////////////////// std::ostream & KernelArgument::ValueIterator::print(std::ostream &out) const { out << "[" << (void *)this << " " << argument->qualified_name() << "] "; if (this->null_argument) { out << "<null>"; } else { out << "<not null>"; } return out; } KernelArgument::~KernelArgument() { } ////////////////////////////////////////////////////////////////////////////////////////////////// ScalarArgument::ScalarValue::ScalarValue( std::string const &value_, ScalarArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), value(value_) { } std::ostream &ScalarArgument::ScalarValue::print(std::ostream &out) const { out << argument->qualified_name() << ": "; if (not_null) { out << value; } else { out << "<null>"; } return out; } ScalarArgument::ScalarValueIterator::ScalarValueIterator( ScalarArgument const *argument_ ): KernelArgument::ValueIterator(argument_) { if (argument_) { value_it = argument_->values.begin(); } } void ScalarArgument::ScalarValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; } } bool ScalarArgument::ScalarValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kScalar) { throw std::runtime_error("Cannot compare ScalarValueIterator with iterator of different type"); } auto const & scalar_it = static_cast<ScalarValueIterator const &>(it); return value_it == scalar_it.value_it; } /// Gets the value pointed to std::unique_ptr<KernelArgument::Value> ScalarArgument::ScalarValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new ScalarArgument::ScalarValue( std::string(), static_cast<ScalarArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new ScalarArgument::ScalarValue( *value_it, static_cast<ScalarArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> ScalarArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new ScalarValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> ScalarArgument::end() const { ScalarValueIterator *it = new ScalarValueIterator(this); it->value_it = this->values.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// IntegerArgument::IntegerValue::IntegerValue( int64_t value_, IntegerArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), value(value_) { } /// Pretty printer for debugging std::ostream &IntegerArgument::IntegerValue::print(std::ostream &out) const { out << argument->qualified_name() << ": "; if (not_null) { out << value; } else { out << "<null>"; } return out; } IntegerArgument::IntegerValueIterator::IntegerValueIterator(IntegerArgument const *argument_): KernelArgument::ValueIterator(argument_) { if (argument_) { range_it = argument_->ranges.begin(); if (range_it != argument_->ranges.end()) { value_it = range_it->begin(); } } } void IntegerArgument::IntegerValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; if (value_it == range_it->end()) { ++range_it; if (range_it != static_cast<IntegerArgument const *>(argument)->ranges.end()) { value_it = range_it->begin(); } } } } bool IntegerArgument::IntegerValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kInteger) { throw std::runtime_error("Cannot compare IntegerValueIterator with iterator of different type"); } auto const & integer_iterator = static_cast<IntegerValueIterator const &>(it); if (this->null_argument) { return it.null_argument; } else { if (range_it != integer_iterator.range_it) { return false; } if (range_it == static_cast<IntegerArgument const *>(argument)->ranges.end() && range_it == integer_iterator.range_it) { return true; } return value_it == integer_iterator.value_it; } } std::unique_ptr<KernelArgument::Value> IntegerArgument::IntegerValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new IntegerArgument::IntegerValue( 0, static_cast<IntegerArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new IntegerArgument::IntegerValue( *value_it, static_cast<IntegerArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> IntegerArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new IntegerValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> IntegerArgument::end() const { IntegerValueIterator *it = new IntegerValueIterator(this); it->range_it = this->ranges.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// TensorArgument::TensorValue::TensorValue( TensorDescription const &desc_, TensorArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), desc(desc_) { } /// Pretty printer for debugging std::ostream &TensorArgument::TensorValue::print(std::ostream &out) const { out << argument->qualified_name() << ": " << to_string(desc.element) << ": " << to_string(desc.layout); return out; } TensorArgument::TensorValueIterator::TensorValueIterator( TensorArgument const *argument_ ): KernelArgument::ValueIterator(argument_) { if (argument_) { value_it = argument_->values.begin(); } } void TensorArgument::TensorValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; } } bool TensorArgument::TensorValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kTensor) { throw std::runtime_error("Cannot compare TensorValueIterator with iterator of different type"); } auto const & tensor_it = static_cast<TensorValueIterator const &>(it); return value_it == tensor_it.value_it; } /// Gets the value pointed to std::unique_ptr<KernelArgument::Value> TensorArgument::TensorValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new TensorArgument::TensorValue( TensorDescription(), static_cast<TensorArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new TensorArgument::TensorValue( *value_it, static_cast<TensorArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> TensorArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new TensorValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> TensorArgument::end() const { TensorValueIterator *it = new TensorValueIterator(this); it->value_it = this->values.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// EnumeratedTypeArgument::EnumeratedTypeValue::EnumeratedTypeValue( std::string const & element_, EnumeratedTypeArgument const *argument_, bool not_null_ ): KernelArgument::Value(argument_, not_null_), element(element_) { } /// Pretty printer for debugging std::ostream &EnumeratedTypeArgument::EnumeratedTypeValue::print(std::ostream &out) const { out << argument->qualified_name() << ": " << element; return out; } EnumeratedTypeArgument::EnumeratedTypeValueIterator::EnumeratedTypeValueIterator( EnumeratedTypeArgument const *argument_ ): KernelArgument::ValueIterator(argument_) { if (argument_) { value_it = argument_->values.begin(); } } void EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator++() { if (this->null_argument) { this->null_argument = false; } else { ++value_it; } } bool EnumeratedTypeArgument::EnumeratedTypeValueIterator::operator==(ValueIterator const &it) const { if (it.type() != ArgumentTypeID::kEnumerated) { throw std::runtime_error("Cannot compare EnumeratedTypeValueIterator with iterator of different type"); } auto const & enumerated_type_it = static_cast<EnumeratedTypeValueIterator const &>(it); return value_it == enumerated_type_it.value_it; } /// Gets the value pointed to std::unique_ptr<KernelArgument::Value> EnumeratedTypeArgument::EnumeratedTypeValueIterator::at() const { if (this->null_argument) { return std::unique_ptr<KernelArgument::Value>( new EnumeratedTypeValue( std::string(), static_cast<EnumeratedTypeArgument const *>(argument), false)); } else { return std::unique_ptr<KernelArgument::Value>( new EnumeratedTypeValue( *value_it, static_cast<EnumeratedTypeArgument const *>(argument))); } } std::unique_ptr<KernelArgument::ValueIterator> EnumeratedTypeArgument::begin() const { return std::unique_ptr<KernelArgument::ValueIterator>(new EnumeratedTypeValueIterator(this)); } std::unique_ptr<KernelArgument::ValueIterator> EnumeratedTypeArgument::end() const { EnumeratedTypeValueIterator *it = new EnumeratedTypeValueIterator(this); it->value_it = this->values.end(); it->null_argument = false; return std::unique_ptr<ValueIterator>(it); } ////////////////////////////////////////////////////////////////////////////////////////////////// ProblemSpace::Iterator::Iterator() { } ProblemSpace::Iterator::Iterator(ProblemSpace const &problem_space) { for (auto const & arg_ptr : problem_space.arguments) { construct_(arg_ptr.get()); } } ProblemSpace::Iterator::Iterator(Iterator && it) { iterators = std::move(it.iterators); } /// Helper for recursively constructing iterators void ProblemSpace::Iterator::construct_(KernelArgument const *argument) { iterators.emplace_back(argument->begin()); } /// Given a set of ranges, iterate over the points within their Cartesian product. No big deal. void ProblemSpace::Iterator::operator++() { // Define a pair of iterator into the vector of iterators. IteratorVector::iterator iterator_it = iterators.begin(); IteratorVector::iterator next_iterator = iterator_it; // Advance the first argument. ++(**iterator_it); // Maintain a pair of iterators over consecutive arguments. ++next_iterator; // Carry logic while (next_iterator != iterators.end() && **iterator_it == *((*iterator_it)->argument->end())) { // Did an iterator reach the end of its range? (*iterator_it) = (*iterator_it)->argument->begin(); // Reset that iterator, ++(**next_iterator); // and increment the next argument's iterator. iterator_it = next_iterator; // Advance to the next argument ++next_iterator; } } /// Moves iterator to end void ProblemSpace::Iterator::move_to_end() { if (!iterators.empty()) { std::unique_ptr<KernelArgument::ValueIterator> new_iter = iterators.back()->argument->end(); std::swap(iterators.back(), new_iter); } } ProblemSpace::Problem ProblemSpace::Iterator::at() const { Problem problem; for (std::unique_ptr<KernelArgument::ValueIterator> const & it : iterators) { problem.emplace_back(it->at()); } return problem; } /// Equality operator bool ProblemSpace::Iterator::operator==(Iterator const &it) const { // This would be an opportunity for auto, but explicitly denoting references to // owning smart pointers to dynamic polymorphic objects seems like a kindness to the reader. IteratorVector::const_iterator first_it = iterators.begin(); IteratorVector::const_iterator second_it = it.iterators.begin(); int idx = 0; for (; first_it != iterators.end(); ++first_it, ++second_it, ++idx) { KernelArgument::ValueIterator const *my_it = first_it->get(); KernelArgument::ValueIterator const *their_it = second_it->get(); if (*my_it != *their_it) { return false; } } return true; } std::ostream &ProblemSpace::Iterator::print(std::ostream &out) const { for (std::unique_ptr<KernelArgument::ValueIterator> const & iter_ptr : iterators) { out << " [iter " << (iter_ptr->null_argument ? "null" : "<not null>") << ", type: " << to_string(iter_ptr->argument->description->type) << "]" << std::endl; } return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// ProblemSpace::ProblemSpace(ArgumentDescriptionVector const &schema, CommandLine const &cmdline) { // Clone the arguments for (ArgumentDescription const & arg_desc : schema) { clone_(arguments, &arg_desc); } // Parse values from the command line for (auto & arg : arguments) { parse_(arg.get(), cmdline); } } /// Returns the index of an argument by name size_t ProblemSpace::argument_index(char const *name) const { return argument_index_map.at(name); } /// Helper for recursively cloning void ProblemSpace::clone_( KernelArgumentVector &kernel_args, ArgumentDescription const *arg_desc) { KernelArgument *kernel_arg = nullptr; switch (arg_desc->type) { case ArgumentTypeID::kScalar: kernel_arg = new ScalarArgument(arg_desc); break; case ArgumentTypeID::kInteger: kernel_arg = new IntegerArgument(arg_desc); break; case ArgumentTypeID::kTensor: kernel_arg = new TensorArgument(arg_desc); break; case ArgumentTypeID::kStructure: { throw std::runtime_error("ArgumentTypeID::kStructure not supported"); } break; case ArgumentTypeID::kEnumerated: kernel_arg = new EnumeratedTypeArgument(arg_desc); break; default: break; } if (kernel_arg) { size_t idx = kernel_args.size(); for (auto const &alias : arg_desc->aliases) { argument_index_map.insert(std::make_pair(alias, idx)); } kernel_args.emplace_back(kernel_arg); } } /// Parses a command line void ProblemSpace::parse_(KernelArgument *arg, CommandLine const &cmdline) { switch (arg->description->type) { case ArgumentTypeID::kScalar: { auto * scalar = static_cast<ScalarArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::vector<std::string>> tokens; cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); for (auto const & vec : tokens) { if (!vec.empty()) { scalar->values.push_back(vec.front()); } } break; } } } break; case ArgumentTypeID::kInteger: { auto *integer = static_cast<IntegerArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::vector<std::string> > tokens; cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); for (auto &range_tokens : tokens) { if (!range_tokens.empty()) { Range range; if (range_tokens.front() == "rand") { range.mode = Range::Mode::kRandom; } else if (range_tokens.front() == "randlg2") { range.mode = Range::Mode::kRandomLog2; } switch (range.mode) { case Range::Mode::kSequence: { range.first = lexical_cast<int64_t>(range_tokens.front()); if (range_tokens.size() > 1) { range.last = lexical_cast<int64_t>(range_tokens.at(1)); } else { range.last = range.first; } if (range_tokens.size() > 2) { range.increment = lexical_cast<int64_t>(range_tokens.at(2)); } else { range.increment = 1; } } break; case Range::Mode::kRandom: // fall-through case Range::Mode::kRandomLog2: { if (range_tokens.size() < 4) { throw std::runtime_error( "Range of mode 'rand' must have four tokens showing " "the minimum, maximum, and number of iterations. For example, " "rand:16:128:1000"); } range.minimum = lexical_cast<int64_t>(range_tokens.at(1)); range.maximum = lexical_cast<int64_t>(range_tokens.at(2)); range.first = 1; range.last = lexical_cast<int64_t>(range_tokens.at(3)); range.increment = 1; if (range_tokens.size() > 4) { range.divisible = lexical_cast<int64_t>(range_tokens.at(4)); } } break; default: throw std::runtime_error("Unsupported range mode."); break; } integer->ranges.push_back(range); } } break; } } } break; case ArgumentTypeID::kTensor: { auto *tensor = static_cast<TensorArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::vector<std::string>> tokens; cmdline.get_cmd_line_argument_ranges(alias.c_str(), tokens); for (auto const & tensor_tokens : tokens) { if (!tensor_tokens.empty()) { TensorArgument::TensorDescription tensor_desc; tensor_desc.element = cutlass::library::from_string<library::NumericTypeID>(tensor_tokens.front()); // Layout if (tensor_tokens.size() > 1) { tensor_desc.layout = cutlass::library::from_string<library::LayoutTypeID>(tensor_tokens.at(1)); } // Stride for (size_t i = 2; i < tensor_tokens.size(); ++i) { tensor_desc.stride.push_back(lexical_cast<int>(tensor_tokens.at(i))); } tensor->values.push_back(tensor_desc); } } break; } } } break; case ArgumentTypeID::kStructure: { throw std::runtime_error("Structure arguments not supported"); } break; case ArgumentTypeID::kEnumerated: { auto *enumerated_type = static_cast<EnumeratedTypeArgument *>(arg); for (auto const &alias : arg->description->aliases) { if (cmdline.check_cmd_line_flag(alias.c_str())) { std::vector<std::string> tokens; cmdline.get_cmd_line_arguments(alias.c_str(), tokens); for (auto const & token : tokens) { enumerated_type->values.push_back(token); } break; } } } break; default: break; } } ///////////////////////////////////////////////////////////////////////////////////////////////// ProblemSpace::Iterator ProblemSpace::begin() const { return ProblemSpace::Iterator(*this); } ProblemSpace::Iterator ProblemSpace::end() const { ProblemSpace::Iterator it(*this); it.move_to_end(); return it; } /// Gets all argument names as an ordered vector std::vector<std::string> ProblemSpace::argument_names() const { Problem problem = this->begin().at(); std::vector<std::string> names; names.reserve(problem.size()); for (auto const & arg : problem) { names.push_back(arg->argument->description->aliases.front()); } return names; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int(int64_t &int_value, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) { int_value = static_cast<IntegerArgument::IntegerValue const *>(value_ptr)->value; } else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) { std::stringstream ss; ss << static_cast<ScalarArgument::ScalarValue const *>(value_ptr)->value; ss >> int_value; } else { throw std::runtime_error( "arg_as_int64_t() - illegal cast. Problem space argument must be integer or scalar"); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_int(int &int_value, KernelArgument::Value const *value_ptr) { int64_t value64; bool obtained = arg_as_int(value64, value_ptr); if (obtained) { int_value = int(value64); return true; } return false; } /// Lexically casts an argument to an int bool arg_as_int( int &int_value, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_int(int_value, value_ptr); } /// Lexically casts an argument to an int64 bool arg_as_int( int64_t &int_value, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_int(int_value, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_NumericTypeID( library::NumericTypeID &numeric_type, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { numeric_type = library::from_string<library::NumericTypeID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (numeric_type == library::NumericTypeID::kInvalid) { throw std::runtime_error( "arg_as_NumericTypeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_NumericTypeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_NumericTypeID( library::NumericTypeID &numeric_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_NumericTypeID(numeric_type, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_RasterOrder( library::RasterOrder &raster_order, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { raster_order = library::from_string<library::RasterOrder>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (raster_order == library::RasterOrder::kInvalid) { throw std::runtime_error( "arg_as_RasterOrder() - illegal cast."); } } else { throw std::runtime_error( "arg_as_RasterOrder() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_RasterOrder( library::RasterOrder &raster_order, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_RasterOrder(raster_order, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_LayoutTypeID( library::LayoutTypeID &layout_type, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { layout_type = library::from_string<library::LayoutTypeID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (layout_type == library::LayoutTypeID::kInvalid) { throw std::runtime_error( "arg_as_LayoutTypeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_LayoutTypeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_LayoutTypeID( library::LayoutTypeID &layout_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_LayoutTypeID(layout_type, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_OpcodeClassID( library::OpcodeClassID &opcode_class, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { opcode_class = library::from_string<library::OpcodeClassID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (opcode_class == library::OpcodeClassID::kInvalid) { throw std::runtime_error( "arg_as_OpcodeClassID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_OpcodeClassID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_OpcodeClassID( library::OpcodeClassID &opcode_class, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_OpcodeClassID(opcode_class, value_ptr); } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_SplitKModeID( library::SplitKMode &split_k_mode, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { split_k_mode = library::from_string<library::SplitKMode>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (split_k_mode == library::SplitKMode::kInvalid) { throw std::runtime_error( "arg_as_SplitKModeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_SplitKModeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_SplitKModeID( library::SplitKMode &split_k_mode, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_SplitKModeID(split_k_mode, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ConvModeID( library::ConvModeID &conv_mode, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { conv_mode = library::from_string<library::ConvModeID>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (conv_mode == library::ConvModeID::kInvalid) { throw std::runtime_error( "arg_as_ConvModeID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_ConvModeID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ConvModeID( library::ConvModeID &conv_mode, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_ConvModeID(conv_mode, value_ptr); } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ProviderID( library::Provider &provider, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { provider = library::from_string<library::Provider>( static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)->element); if (provider == library::Provider::kInvalid) { throw std::runtime_error( "arg_as_ProviderID() - illegal cast."); } } else { throw std::runtime_error( "arg_as_ProviderID() - illegal cast."); } return true; } return false; } /// Lexically casts an argument to an int64 if it is defined. Returns true if not null. bool arg_as_ProviderID( library::Provider &provider, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_ProviderID(provider, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Lexically casts an argument to a given type stored in a byte array. Returns true if not null. bool arg_as_scalar( std::vector<uint8_t> &bytes, library::NumericTypeID numeric_type, KernelArgument::Value const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->argument->description->type == ArgumentTypeID::kInteger) { int64_t int_value = static_cast<IntegerArgument::IntegerValue const *>(value_ptr)->value; // TODO - convert int64_t => destination type } else if (value_ptr->argument->description->type == ArgumentTypeID::kScalar) { std::string const &str_value = static_cast<ScalarArgument::ScalarValue const *>(value_ptr)->value; return lexical_cast(bytes, numeric_type, str_value); } else { throw std::runtime_error( "arg_as_int() - illegal cast. Problem space argument must be integer or scalar"); } return true; } return false; } /// Lexically casts an argument to a given type and returns a byte array bool arg_as_scalar( std::vector<uint8_t> &bytes, library::NumericTypeID numeric_type, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); return arg_as_scalar(bytes, numeric_type, value_ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if a tensor description satisfies a `tensor` value bool tensor_description_satisfies( library::TensorDescription const &tensor_desc, TensorArgument::TensorValue const *value_ptr) { if (value_ptr->not_null) { if (value_ptr->desc.element != library::NumericTypeID::kUnknown && value_ptr->desc.element != tensor_desc.element) { return false; } if (value_ptr->desc.layout != library::LayoutTypeID::kUnknown && value_ptr->desc.layout != tensor_desc.layout) { return false; } } return true; } /// Returns true if a tensor description satisfies a `tensor` value bool tensor_description_satisfies( library::TensorDescription const &tensor_desc, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); if (value_ptr->argument->description->type == ArgumentTypeID::kTensor) { return tensor_description_satisfies( tensor_desc, static_cast<TensorArgument::TensorValue const *>(value_ptr)); } else { throw std::runtime_error("Kernel argument mismatch"); } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if conv_kind satisfies the value bool conv_kind_satisfies( library::ConvKind const &conv_kind, EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) { if (value_ptr->not_null) { library::ConvKind conv_kind_cmd_line = library::from_string<library::ConvKind>(value_ptr->element); if (conv_kind_cmd_line != library::ConvKind::kUnknown && conv_kind_cmd_line != conv_kind) { return false; } } return true; } /// Returns true if conv_kind satisfies the value bool conv_kind_satisfies( library::ConvKind const &conv_kind, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { return conv_kind_satisfies( conv_kind, static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)); } else { throw std::runtime_error("Kernel argument mismatch"); } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if a iterator algorithm satisfies the value bool iterator_algorithm_satisfies( library::IteratorAlgorithmID const &iterator_algorithm, EnumeratedTypeArgument::EnumeratedTypeValue const *value_ptr) { if (value_ptr->not_null) { library::IteratorAlgorithmID iterator_algorithm_cmd_line = library::from_string<library::IteratorAlgorithmID>(value_ptr->element); if (iterator_algorithm_cmd_line != library::IteratorAlgorithmID::kNone && iterator_algorithm_cmd_line != iterator_algorithm) { return false; } } return true; } /// Returns true if a iterator algorithm satisfies the value bool iterator_algorithm_satisfies( library::IteratorAlgorithmID const &iterator_algorithm, char const *name, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { size_t idx = problem_space.argument_index(name); KernelArgument::Value const *value_ptr = problem.at(idx).get(); if (value_ptr->argument->description->type == ArgumentTypeID::kEnumerated) { return iterator_algorithm_satisfies( iterator_algorithm, static_cast<EnumeratedTypeArgument::EnumeratedTypeValue const *>(value_ptr)); } else { throw std::runtime_error("Kernel argument mismatch"); } return false; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/profiler/src/problem_space.cpp/0
{ "file_path": "cutlass/tools/profiler/src/problem_space.cpp", "repo_id": "cutlass", "token_count": 13631 }
62
/****************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief cuda kernels for padding in device memory with NHWC layout. */ #include "cutlass/cutlass.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_coord.h" #include "cutlass/tensor_ref.h" namespace cutlass { /** \brief interface for padding in a device memory tensor with NHWC layout * \tparam T: data type */ template <typename T> void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord output_tensor_size, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, cudaStream_t stream); template <typename T> __global__ void nhwc_padding_kernel(const int32_t n, const int32_t h, const int32_t w, const int32_t c_in, const int32_t c_out, const T zero, const T *input, T *output){ const int32_t idx_jump = blockDim.x * gridDim.x; const int32_t total_elements = n * h * w * c_out; int32_t c_idx, w_idx, h_idx, n_idx, resudial; T value; for (int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < total_elements; idx += idx_jump) { c_idx = idx%c_out; if (c_idx >= c_in){ value = zero; } else{ resudial = idx/c_out; w_idx = resudial%w; resudial = resudial/w; h_idx = resudial%h; n_idx = resudial/h; resudial = ((n_idx * h + h_idx) * w + w_idx) * c_in + c_idx; value = input[resudial]; } output[idx] = value; } } // fast kernel for c_in = 3 & c_out = 4 template <typename Tio, typename Telement, int element_in_Tio> __global__ void nhwc_padding_channel_3To4_kernel(const int32_t n, const int32_t h, const int32_t w, const Tio *input, Tio *output, const int32_t max_output_element, const int32_t max_input_element, const Tio zero_io, const Telement zero_element){ __shared__ Tio shm[192]; const int tidx = blockIdx.x * 192 + threadIdx.x; const int threadidx = threadIdx.x; shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx]; __syncthreads(); const int output_offset = blockIdx.x * 256; const int lower_bound = max_output_element < output_offset + 256 ? max_output_element : output_offset + 256; for (int i = output_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192) { const Telement* shm_element = (const Telement*)shm + j*3*element_in_Tio/4; Telement array[element_in_Tio]; CUTLASS_PRAGMA_UNROLL for (int k = 0 ; k < element_in_Tio ; k++) array[k] = ((k+1)%4 == 0) ? zero_element : shm_element[(k > 3) ? (k - 1) : k]; output[i] = *((const Tio *)array); } } // fast kernel for c_in = 3 & c_out = 8 template <typename Tio, typename Telement, int element_in_Tio> __global__ void nhwc_padding_channel_3To8_kernel(const int32_t n, const int32_t h, const int32_t w, const Tio *input, Tio *output, const int32_t max_output_element, const int32_t max_input_element, const Tio zero_io, const Telement zero_element){ __shared__ Tio shm[192]; const int tidx = blockIdx.x * 192 + threadIdx.x; const int threadidx = threadIdx.x; shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx]; __syncthreads(); const int output_offset = blockIdx.x * 512; const int lower_bound = max_output_element < output_offset + 512 ? max_output_element : output_offset + 512; for (int i = output_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192) { const Telement* shm_element = (const Telement*)shm + (element_in_Tio == 4 ? j/2 : j)*3; Telement array[element_in_Tio]; //float if (element_in_Tio == 4){ CUTLASS_PRAGMA_UNROLL for (int k = 0 ; k < element_in_Tio ; k++) array[k] = ((j % 2) == 1) ? zero_element : ((k >= 3) ? zero_element : shm_element[k]); } //half else{ CUTLASS_PRAGMA_UNROLL for (int k = 0 ; k < element_in_Tio ; k++) array[k] = (k >= 3) ? zero_element : shm_element[k]; } output[i] = *((const Tio *)array); } } template <typename T> void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size, cutlass::Tensor4DCoord output_tensor_size, TensorRef<T, layout::TensorNHWC> ref_input, TensorRef<T, layout::TensorNHWC> ref_output, cudaStream_t stream){ assert( input_tensor_size.n() == output_tensor_size.n() && input_tensor_size.h() == output_tensor_size.h() && input_tensor_size.w() == output_tensor_size.w() && input_tensor_size.c() <= output_tensor_size.c()); int n = input_tensor_size.n(); int h = input_tensor_size.h(); int w = input_tensor_size.w(); int c_in = input_tensor_size.c(); int c_out = output_tensor_size.c(); //case 1 : channel == 3 padding to 4 or 8 if ((c_out == 4 || c_out == 8) && c_in == 3 && (n*h*w % 8 == 0)){ dim3 block(192); const int nhw = n*h*w; const int nhwc = nhw*c_in; //for half_t if (cutlass::sizeof_bits<T>::value == 16){ const int element_in_Tio = 8; const int max_input_element = nhwc/element_in_Tio; const int max_output_element = nhw*c_out/element_in_Tio; const int4 zero_io = {0, 0, 0, 0}; const half_t zero_element = static_cast<half_t>(0.0f); dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio)); if (c_out == 4){ nhwc_padding_channel_3To4_kernel<int4, half_t, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const int4 *)ref_input.data(), (int4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } else if (c_out == 8){ nhwc_padding_channel_3To8_kernel<int4, half_t, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const int4 *)ref_input.data(), (int4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } } //for float else{ const int element_in_Tio = 4; const int max_input_element = nhwc/element_in_Tio; const int max_output_element = nhw*c_out/element_in_Tio; const float4 zero_io = {0.0f, 0.0f, 0.0f, 0.0f}; const float zero_element = 0.0f; dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio)); if (c_out == 4){ nhwc_padding_channel_3To4_kernel<float4, float, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const float4 *)ref_input.data(), (float4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } else if (c_out == 8){ nhwc_padding_channel_3To8_kernel<float4, float, element_in_Tio><<<grid, block, 0, stream>>> (n, h, w, (const float4 *)ref_input.data(), (float4 *)ref_output.data(), max_output_element, max_input_element, zero_io, zero_element); } } } //case 2 : even channel else if ((c_out % 2) == 0 && (c_in % 2) == 0){ int32_t total_elements = n * h * w * c_out / 2; int block_size = 256; dim3 grid((total_elements + 255)/256); dim3 block(block_size); //for half_t if (cutlass::sizeof_bits<T>::value == 16){ const __half2 zero = {0.0f, 0.0f}; nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in/2, c_out/2, zero, (const __half2*)ref_input.data(), (__half2*)ref_output.data()); } //for float else{ const float2 zero = {0.0f, 0.0f}; nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in/2, c_out/2, zero, (const float2*)ref_input.data(), (float2*)ref_output.data()); } } //case 3 : odd channel else{ int32_t total_elements = n * h * w * c_out; int block_size = 256; dim3 grid((total_elements + 255)/256); dim3 block(block_size); const T zero = static_cast<T>(0.0f); nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in, c_out, zero, ref_input.data(), ref_output.data()); } } } //namespace cutlass
cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h", "repo_id": "cutlass", "token_count": 5398 }
63
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for GEMM in host-side code. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" namespace cutlass { namespace reference { namespace detail { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Template function to compute an inner product. #pragma hd_warning_disable // Suppresses warnings when attempting to instantiate with a // host-only type template <typename Atype, typename Btype, typename Ctype> CUTLASS_HOST_DEVICE Ctype inner_product(Atype a, Btype b, Ctype c) { return Ctype(a) * Ctype(b) + c; } /// Specialization for matrix multiplication with binary operands template <> CUTLASS_HOST_DEVICE int inner_product<Array<bin1_t, 32>, Array<bin1_t, 32>, int>( Array<bin1_t, 32> a, Array<bin1_t, 32> b, int c) { int accum = 0; for (int bit = 0; bit < 32; bit++) { accum += a[bit] ^ b[bit]; } return accum + c; } /* /// Specialization for matrix multiplication with signed 4-bit integer operands template <> CUTLASS_HOST_DEVICE int inner_product<Array<int4b_t, 8>, Array<int4b_t, 8>, int>( Array<int4b_t, 8> a, Array<int4b_t, 8> b, int c) { int accum = 0; for (int k = 0; k < 8; k++) { accum += a[k] * b[k]; } return accum + c; } /// Specialization for matrix multiplication with unsigned 4-bit integer operands template <> CUTLASS_HOST_DEVICE int inner_product<Array<uint4b_t, 8>, Array<uint4b_t, 8>, int>( Array<uint4b_t, 8> a, Array<uint4b_t, 8> b, int c) { int accum = 0; for (int k = 0; k < 8; k++) { accum += a[k] * b[k]; } return accum + c; } */ //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename SrcType, typename DstType> struct Cast { // Default behavior: convert to the destination type #pragma hd_warning_disable // Suppresses warnings when attempting to instantiate complex<T> with a // host-only type CUTLASS_HOST_DEVICE static DstType apply(SrcType src) { return static_cast<DstType>(src); }; }; template <> struct Cast<float, int8_t> { CUTLASS_HOST_DEVICE static int8_t apply(float src) { // Clamp to the range of signed 8-bit integers. return static_cast<int8_t>(fmaxf(-128.f, fminf(127.f, src))); }; }; template <> struct Cast<float, uint8_t> { CUTLASS_HOST_DEVICE static uint8_t apply(float src) { // Clamp to the range of signed 8-bit integers. return static_cast<uint8_t>(fmaxf(0.f, fminf(255.f, src))); }; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace detail } // namespace reference } // namespace cutlass
cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h", "repo_id": "cutlass", "token_count": 1468 }
64
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Reference implementation for GEMM in host-side code. */ #pragma once #include "cutlass/coord.h" #include "cutlass/tensor_view.h" #include "cutlass/gemm/gemm.h" namespace cutlass { namespace reference { namespace device { namespace thread { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Thread-level blocked general matrix product. // // Note, this is a reference implementation. Performance is not expected to approach peak. // template < typename TensorRefA, typename TensorRefB, typename TensorRefC, typename ScalarType, typename AccumulatorType, typename OutputTile, typename InnerProductOp = multiply_add<AccumulatorType>, typename ConvertOp = NumericConverter<typename TensorRefC::Element, ScalarType> > struct Gemm { using ElementA = typename TensorRefA::Element; using ElementB = typename TensorRefB::Element; using ElementC = typename TensorRefC::Element; // // Data members // /// Tile for A operand ElementA A_tile[OutputTile::kColumn]; /// Tile for B operand ElementB B_tile[OutputTile::kRow]; /// Tile for Accumulator AccumulatorType accum[OutputTile::kColumn][OutputTile::kRow]; // // Methods // /// Constructor CUTLASS_HOST_DEVICE Gemm(AccumulatorType initial_accum = AccumulatorType(0)) { // Clear fetch registers for (int i = 0; i < OutputTile::kColumn; ++i) { A_tile[i] = ElementA(0); } for (int j = 0; j < OutputTile::kRow; ++j) { B_tile[j] = ElementB(0); } // Clear accumulators CUTLASS_PRAGMA_UNROLL for (int j = 0; j < OutputTile::kColumn; ++j) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < OutputTile::kRow; ++i) { accum[j][i] = initial_accum; } } } /// Computes a matrix product CUTLASS_HOST_DEVICE Gemm & multiply_add( gemm::GemmCoord problem_size, TensorRefA tensor_a, TensorRefB tensor_b, MatrixCoord output_coord = MatrixCoord()) { InnerProductOp inner_product_op; // Loop over the GEMM K dimension CUTLASS_PRAGMA_NO_UNROLL for (int k = 0; k < problem_size.k(); ++k) { // Fetch a slice of the A matrix CUTLASS_PRAGMA_UNROLL for (int i = 0; i < OutputTile::kColumn; ++i) { if (output_coord.row() + i < problem_size.m()) { A_tile[i] = tensor_a.at(make_Coord(output_coord.row() + i, k)); } } // Fetch a slice of the B matrix CUTLASS_PRAGMA_UNROLL for (int j = 0; j < OutputTile::kRow; ++j) { if (output_coord.column() + j < problem_size.n()) { B_tile[j] = tensor_b.at(make_Coord(k, output_coord.column() + j)); } } // Compute an accumulated matrix product CUTLASS_PRAGMA_UNROLL for (int j = 0; j < OutputTile::kRow; ++j) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < OutputTile::kColumn; ++i) { accum[j][i] = inner_product_op(A_tile[i], B_tile[j], accum[j][i]); } } } return *this; } /// Performs linear scaling of matrix product and updates output tensor CUTLASS_HOST_DEVICE Gemm & epilogue( gemm::GemmCoord problem_size, ScalarType alpha, ScalarType beta, TensorRefC tensor_c, TensorRefC tensor_d, MatrixCoord output_coord = MatrixCoord()) { ConvertOp convert_op; // Update the output tensor for (int j = 0; j < OutputTile::kRow; ++j) { for (int i = 0; i < OutputTile::kColumn; ++i) { MatrixCoord coord = output_coord + MatrixCoord(i, j); if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) { tensor_d.at(coord) = convert_op( alpha * ScalarType(accum[j][i]) + beta * ScalarType(tensor_c.at(coord)) ); } } } return *this; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace device } // namespace reference } // namespace cutlass
cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h", "repo_id": "cutlass", "token_count": 2101 }
65
var searchData= [ ['kind',['Kind',['../structcutlass_1_1Distribution.html#a499f4023e0d42356ce71d38cc32bf92a',1,'cutlass::Distribution']]] ];
cutlass/docs/search/enums_3.js/0
{ "file_path": "cutlass/docs/search/enums_3.js", "repo_id": "cutlass", "token_count": 61 }
0
var searchData= [ ['debug_2eh',['debug.h',['../tools_2util_2include_2cutlass_2util_2debug_8h.html',1,'']]], ['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2device_2gemm_8h.html',1,'']]], ['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2device_2thread_2gemm_8h.html',1,'']]], ['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2host_2gemm_8h.html',1,'']]], ['gemm_2eh',['gemm.h',['../tools_2util_2include_2cutlass_2util_2reference_2device_2kernel_2gemm_8h.html',1,'']]], ['gemm_5fcomplex_2eh',['gemm_complex.h',['../tools_2util_2include_2cutlass_2util_2reference_2host_2gemm__complex_8h.html',1,'']]], ['matrix_2eh',['matrix.h',['../thread_2matrix_8h.html',1,'']]], ['predicated_5ftile_5fiterator_2eh',['predicated_tile_iterator.h',['../transform_2threadblock_2predicated__tile__iterator_8h.html',1,'']]], ['tensor_2eh',['tensor.h',['../tensor_8h.html',1,'']]], ['tensor_5fcoord_2eh',['tensor_coord.h',['../tensor__coord_8h.html',1,'']]], ['tensor_5fcopy_2eh',['tensor_copy.h',['../tensor__copy_8h.html',1,'']]], ['tensor_5fnorm_2eh',['tensor_norm.h',['../tensor__norm_8h.html',1,'']]], ['tensor_5fop_5fmultiplicand_5fsm70_2eh',['tensor_op_multiplicand_sm70.h',['../tensor__op__multiplicand__sm70_8h.html',1,'']]], ['tensor_5fop_5fmultiplicand_5fsm75_2eh',['tensor_op_multiplicand_sm75.h',['../tensor__op__multiplicand__sm75_8h.html',1,'']]], ['tensor_5fop_5fpolicy_2eh',['tensor_op_policy.h',['../tensor__op__policy_8h.html',1,'']]], ['tensor_5fref_2eh',['tensor_ref.h',['../tensor__ref_8h.html',1,'']]], ['tensor_5fview_2eh',['tensor_view.h',['../tensor__view_8h.html',1,'']]], ['tensor_5fview_5fio_2eh',['tensor_view_io.h',['../tensor__view__io_8h.html',1,'']]], ['tile_5fiterator_5fsimt_2eh',['tile_iterator_simt.h',['../tile__iterator__simt_8h.html',1,'']]], ['tile_5fiterator_5ftensor_5fop_2eh',['tile_iterator_tensor_op.h',['../tile__iterator__tensor__op_8h.html',1,'']]], ['tile_5fiterator_5fvolta_5ftensor_5fop_2eh',['tile_iterator_volta_tensor_op.h',['../tile__iterator__volta__tensor__op_8h.html',1,'']]], ['tile_5fiterator_5fwmma_5ftensor_5fop_2eh',['tile_iterator_wmma_tensor_op.h',['../tile__iterator__wmma__tensor__op_8h.html',1,'']]], ['transpose_2eh',['transpose.h',['../transpose_8h.html',1,'']]], ['type_5ftraits_2eh',['type_traits.h',['../type__traits_8h.html',1,'']]] ];
cutlass/docs/search/files_11.js/0
{ "file_path": "cutlass/docs/search/files_11.js", "repo_id": "cutlass", "token_count": 1119 }
1
var searchData= [ ['half_2eh',['half.h',['../half_8h.html',1,'']]], ['host_5freorder_2eh',['host_reorder.h',['../host__reorder_8h.html',1,'']]], ['host_5ftensor_2eh',['host_tensor.h',['../host__tensor_8h.html',1,'']]], ['tensor_5fcompare_2eh',['tensor_compare.h',['../host_2tensor__compare_8h.html',1,'']]], ['tensor_5felementwise_2eh',['tensor_elementwise.h',['../host_2tensor__elementwise_8h.html',1,'']]], ['tensor_5ffill_2eh',['tensor_fill.h',['../host_2tensor__fill_8h.html',1,'']]], ['tensor_5fforeach_2eh',['tensor_foreach.h',['../host_2tensor__foreach_8h.html',1,'']]] ];
cutlass/docs/search/files_7.js/0
{ "file_path": "cutlass/docs/search/files_7.js", "repo_id": "cutlass", "token_count": 276 }
2
var searchData= [ ['real_2eh',['real.h',['../real_8h.html',1,'']]], ['reduce_2eh',['reduce.h',['../reduce_8h.html',1,'']]], ['reduce_5fsplit_5fk_2eh',['reduce_split_k.h',['../reduce__split__k_8h.html',1,'']]], ['reduction_5fop_2eh',['reduction_op.h',['../reduction__op_8h.html',1,'']]], ['reduction_5foperators_2eh',['reduction_operators.h',['../reduction__operators_8h.html',1,'']]], ['regular_5ftile_5faccess_5fiterator_2eh',['regular_tile_access_iterator.h',['../regular__tile__access__iterator_8h.html',1,'']]], ['regular_5ftile_5faccess_5fiterator_5fpitch_5flinear_2eh',['regular_tile_access_iterator_pitch_linear.h',['../regular__tile__access__iterator__pitch__linear_8h.html',1,'']]], ['regular_5ftile_5faccess_5fiterator_5ftensor_5fop_2eh',['regular_tile_access_iterator_tensor_op.h',['../regular__tile__access__iterator__tensor__op_8h.html',1,'']]], ['regular_5ftile_5fiterator_2eh',['regular_tile_iterator.h',['../regular__tile__iterator_8h.html',1,'']]], ['regular_5ftile_5fiterator_5fpitch_5flinear_2eh',['regular_tile_iterator_pitch_linear.h',['../regular__tile__iterator__pitch__linear_8h.html',1,'']]], ['regular_5ftile_5fiterator_5fpitch_5flinear_5f2dthreadtile_2eh',['regular_tile_iterator_pitch_linear_2dthreadtile.h',['../regular__tile__iterator__pitch__linear__2dthreadtile_8h.html',1,'']]], ['regular_5ftile_5fiterator_5ftensor_5fop_2eh',['regular_tile_iterator_tensor_op.h',['../regular__tile__iterator__tensor__op_8h.html',1,'']]], ['regular_5ftile_5fiterator_5ftensor_5fop_5fsm70_2eh',['regular_tile_iterator_tensor_op_sm70.h',['../regular__tile__iterator__tensor__op__sm70_8h.html',1,'']]], ['relatively_5fequal_2eh',['relatively_equal.h',['../relatively__equal_8h.html',1,'']]], ['threadblock_5fswizzle_2eh',['threadblock_swizzle.h',['../reduction_2threadblock__swizzle_8h.html',1,'']]] ];
cutlass/docs/search/files_f.js/0
{ "file_path": "cutlass/docs/search/files_f.js", "repo_id": "cutlass", "token_count": 767 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS layout visualization example */ #include <map> #include <memory> #include "cutlass/layout/matrix.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor_op_multiplicand_sm70.h" #include "cutlass/layout/tensor_op_multiplicand_sm75.h" #include "cutlass/layout/tensor_op_multiplicand_sm80.h" #include "visualize_layout.h" #include "register_layout.h" ///////////////////////////////////////////////////////////////////////////////////////////////// void RegisterLayouts(std::map<std::string, std::unique_ptr<VisualizeLayoutBase> > &layouts) { struct { char const *name; VisualizeLayoutBase *ptr; } layout_pairs[] = { {"PitchLinear", new VisualizeLayout<cutlass::layout::PitchLinear>}, {"ColumnMajor", new VisualizeLayout<cutlass::layout::ColumnMajor>}, {"RowMajor", new VisualizeLayout<cutlass::layout::RowMajor>}, {"ColumnMajorInterleaved<4>", new VisualizeLayout<cutlass::layout::ColumnMajorInterleaved<4>>}, {"RowMajorInterleaved<4>", new VisualizeLayout<cutlass::layout::RowMajorInterleaved<4>>}, // All Ampere/Turing H/Integer matrix multiply tensor core kernels uses the same swizzling // layout implementation with different templates. // // mma.sync.aligned.m8n8k128.s32.b1.b1.s32 Interleaved-256 // mma.sync.aligned.m16n8k256.s32.b1.b1.s32 Interleaved-256 {"TensorOpMultiplicand<1,256>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 256>>}, // mma.sync.aligned.m8n8k128.s32.b1.b1.s32 TN kblock512 // mma.sync.aligned.m16n8k256.s32.b1.b1.s32 TN kblock512 {"TensorOpMultiplicand<1,512>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 512>>}, // mma.sync.aligned.m16n8k256.s32.b1.b1.s32 TN kblock1024 {"TensorOpMultiplicand<1,1024>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<1, 1024>>}, // Integer matrix multiply.int4 8832 Interleaved-64 // Integer matrix multiply.int4 16864 Interleaved-64 {"TensorOpMultiplicand<4,64>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 64>>}, // Integer matrix multiply.int4 8832 TN kblock128 // Integer matrix multiply.int4 16864 TN kblock128 {"TensorOpMultiplicand<4,128>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 128>>}, // Integer matrix multiply.int4 16864 TN kblock256 {"TensorOpMultiplicand<4,256>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<4, 256>>}, // Integer matrix multiply 8816 Interleaved-32 // Integer matrix multiply 16832 Interleaved-32 {"TensorOpMultiplicand<8,32>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 32>>}, // Integer matrix multiply 8816 TN kblock64 // Integer matrix multiply 16832 TN kblock64 {"TensorOpMultiplicand<8,64>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 64>>}, // Integer matrix multiply 16832 TN kblock128 {"TensorOpMultiplicand<8,128>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<8, 128>>}, // Matrix Multiply 1688 TN kblock32 // Matrix multiply 16816 TN kblock32 {"TensorOpMultiplicand<16,32>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<16, 32>>}, // Matrix multiply 1688 NT // Matrix multiply 16816 NT // Matrix multiply 16816 TN kblock64 {"TensorOpMultiplicand<16,64>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<16, 64>>}, // Matrix multiply 1688.TF32 TN kblock16 {"TensorOpMultiplicand<32,16>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<32, 16>>}, // Matrix multiply 1688.TF32 TN kblock32 {"TensorOpMultiplicand<32,32>", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand<32, 32>>}, // Matrix multiply 1688 NT {"TensorOpMultiplicandCongruous<32,32>", new VisualizeLayout< cutlass::layout::TensorOpMultiplicandCongruous<32, 32>>}, // Matrix multiply 884 NT {"TensorOpMultiplicandCongruous<64,16>", new VisualizeLayout< cutlass::layout::TensorOpMultiplicandCongruous<64, 16>>}, // Matrix multiply 884 TN {"TensorOpMultiplicand64bCrosswise", new VisualizeLayout<cutlass::layout::TensorOpMultiplicand64bCrosswise>}, {"TensorOpMultiplicandCongruous<128,4>", new VisualizeLayout< cutlass::layout::TensorOpMultiplicandCongruous<128, 4>>}, {"TensorOpMultiplicandCrosswise<128,4>", new VisualizeLayout< cutlass::layout::TensorOpMultiplicandCrosswise<128, 4>>}, {"VoltaTensorOpMultiplicandCongruous<16>", new VisualizeLayout< cutlass::layout::VoltaTensorOpMultiplicandCongruous<16>>}, {"VoltaTensorOpMultiplicandCrosswise<16,32>", new VisualizeLayout< cutlass::layout::VoltaTensorOpMultiplicandCrosswise<16, 32>>} }; for (auto layout : layout_pairs) { layouts.emplace(std::string(layout.name), std::unique_ptr<VisualizeLayoutBase>(layout.ptr)); } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/03_visualize_layout/register_layout.cu/0
{ "file_path": "cutlass/examples/03_visualize_layout/register_layout.cu", "repo_id": "cutlass", "token_count": 2565 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a pipelined GEMM kernel. Does not compute batching or support split-K. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/semaphore.h" #include "kernel/b2b_gemm_grouped_problem_visitor.h" #include "threadblock/grouped_threadblock_swizzle.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { namespace detail { /// Utility struct for returning the type of the problem visitor used by the swizzling function, /// if it is a grouped swizzling function, or a default visitor. This is used only for defining /// the parameters of the problem visitor used in GroupedParams. template < typename B2bMma_, typename ThreadblockSwizzle_, typename Enable = void > struct ProblemVisitorOrDefault; /// Return a generic problem visitor for GEMM problems template < typename B2bMma_, typename ThreadblockSwizzle_ > struct ProblemVisitorOrDefault<B2bMma_, ThreadblockSwizzle_, typename platform::enable_if< ! cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle_>::value >::type> { using value = B2bGemmGroupedProblemVisitor<typename B2bMma_::Shape, GroupScheduleMode::kDeviceOnly, 128, 128, platform::is_same<typename B2bMma_::LayoutC, cutlass::layout::ColumnMajor>::value>; }; /// Return the problem visitor specified by the swizzling function template < typename B2bMma_, typename ThreadblockSwizzle_ > struct ProblemVisitorOrDefault<B2bMma_, ThreadblockSwizzle_, typename platform::enable_if< cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle_>::value >::type> { using value = typename ThreadblockSwizzle_::ProblemVisitor; }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename B2bMma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct B2bGemm { using B2bMma = B2bMma_; using Epilogue = Epilogue_; using OutputOp0 = typename B2bMma::OutputOp; using OutputOp1 = typename Epilogue::OutputOp; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA0 = typename B2bMma::IteratorA0::Element; using LayoutA0 = typename B2bMma::IteratorA0::Layout; using ElementB0 = typename B2bMma::IteratorB0::Element; using LayoutB0 = typename B2bMma::IteratorB0::Layout; using ElementB1 = typename B2bMma::IteratorB1::Element; using LayoutB1 = typename B2bMma::IteratorB1::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; using ScaleBiasData = typename B2bMma::IteratorAccumulatorScaleBias::Element; /// Data types needed for higher-level containers. In some cases, a single type must be exposed /// despite the B2b GEMM using two GEMMs under the hood. In such cases, we select the values from /// the second GEMM (other than for ElementA/ElementB) using ElementA = typename B2bMma::IteratorA0::Element; using LayoutA = typename B2bMma::IteratorA0::Layout; using ElementB = typename B2bMma::IteratorB0::Element; using LayoutB = typename B2bMma::IteratorB0::Layout; static ComplexTransform const kTransformA = B2bMma::kTransformA; static ComplexTransform const kTransformB = B2bMma::kTransformB; using Operator = typename B2bMma::Operator0; using OperatorClass = typename Operator::OperatorClass; using ThreadblockShape = typename B2bMma::Shape0; using WarpShape = typename Operator::Shape; using InstructionShape = typename Operator::InstructionShape; using ArchTag = typename B2bMma::ArchTag; static int const kStages = B2bMma::kStages; static int const kAlignmentA = B2bMma::IteratorA::AccessType::kElements; static int const kAlignmentB = B2bMma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; using Mma = B2bMma; using EpilogueOutputOp = OutputOp1; /// Warp count (concept: GemmShape) using WarpCount0 = typename B2bMma::WarpCount0; static int const kThreadCount = 32 * WarpCount0::kCount; /// Argument structure struct Arguments { // // Data members // GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; GemmCoord problem_size_0{0,0,0}; GemmCoord problem_size_1{0,0,0}; typename B2bMma::IteratorA0::TensorRef ref_A0{}; typename B2bMma::IteratorB0::TensorRef ref_B0{}; typename Epilogue::OutputTileIterator::TensorRef ref_C0{}; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0{}; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0{}; typename B2bMma::IteratorB1::TensorRef ref_B1{}; typename Epilogue::OutputTileIterator::TensorRef ref_C1{}; typename Epilogue::OutputTileIterator::TensorRef ref_D1{}; int64_t batch_stride_A0{0}; int64_t batch_stride_B0{0}; int64_t batch_stride_B1{0}; int64_t batch_stride_C1{0}; int64_t batch_stride_D1{0}; int64_t batch_stride_Bias0{0}; int64_t batch_stride_Scale0{0}; typename OutputOp0::Params epilogue0 {}; typename OutputOp1::Params epilogue1 {}; int batch_count{1}; // // Methods // /// Default ctor Arguments() = default; /// Constructs an Arguments structure CUTLASS_HOST_DEVICE Arguments( GemmUniversalMode mode_, GemmCoord problem_size_0_, GemmCoord problem_size_1_, typename B2bMma::IteratorA0::TensorRef ref_A0_, typename B2bMma::IteratorB0::TensorRef ref_B0_, typename Epilogue::OutputTileIterator::TensorRef ref_C0_, typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0_, typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0_, typename B2bMma::IteratorB1::TensorRef ref_B1_, typename Epilogue::OutputTileIterator::TensorRef ref_C1_, typename Epilogue::OutputTileIterator::TensorRef ref_D1_, int64_t batch_stride_A0_, int64_t batch_stride_B0_, int64_t batch_stride_B1_, int64_t batch_stride_C1_, int64_t batch_stride_D1_, int64_t batch_stride_Bias0_, int64_t batch_stride_Scale0_, typename OutputOp0::Params epilogue0_ = typename OutputOp0::Params(), typename OutputOp1::Params epilogue1_ = typename OutputOp1::Params(), int batch_count_ = 1 ): mode(mode_), problem_size_0(problem_size_0_), problem_size_1(problem_size_1_), ref_A0(ref_A0_), ref_B0(ref_B0_), ref_C0(ref_C0_), ref_Scale0(ref_Scale0_), ref_Bias0(ref_Bias0_), ref_B1(ref_B1_), ref_C1(ref_C1_), ref_D1(ref_D1_), batch_stride_A0(batch_stride_A0_), batch_stride_B0(batch_stride_B0_), batch_stride_B1(batch_stride_B1_), batch_stride_C1(batch_stride_C1_), batch_stride_D1(batch_stride_D1_), batch_stride_Bias0(batch_stride_Bias0_), batch_stride_Scale0(batch_stride_Scale0_), epilogue0(epilogue0_), epilogue1(epilogue1_), batch_count(batch_count_) { } }; // Arguments structure for grouped B2B problems struct GroupedArguments { GemmCoord* problem_size_0; GemmCoord* problem_size_1; typename B2bMma::IteratorA0::TensorRef* ref_A0; typename B2bMma::IteratorB0::TensorRef* ref_B0; typename Epilogue::OutputTileIterator::TensorRef* ref_C0; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0; typename B2bMma::IteratorB1::TensorRef* ref_B1; typename Epilogue::OutputTileIterator::TensorRef* ref_C1; typename Epilogue::OutputTileIterator::TensorRef* ref_D1; // Epilogue params remain constant across all problmes in the group. Thus, // the parameter here is not a pointer. typename OutputOp0::Params epilogue0; typename OutputOp1::Params epilogue1; int problem_count; int threadblock_count; GemmCoord* host_problem_sizes; CUTLASS_HOST_DEVICE GroupedArguments( int problem_count, GemmCoord* problem_size_0_, GemmCoord* problem_size_1_, typename B2bMma::IteratorA0::TensorRef* ref_A0_, typename B2bMma::IteratorB0::TensorRef* ref_B0_, typename Epilogue::OutputTileIterator::TensorRef* ref_C0_, typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0_, typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0_, typename B2bMma::IteratorB1::TensorRef* ref_B1_, typename Epilogue::OutputTileIterator::TensorRef* ref_C1_, typename Epilogue::OutputTileIterator::TensorRef* ref_D1_, typename OutputOp0::Params epilogue0_ = typename OutputOp0::Params(), typename OutputOp1::Params epilogue1_ = typename OutputOp1::Params(), int threadblock_count = 0 ) : problem_size_0(problem_size_0_), problem_size_1(problem_size_1_), ref_A0(ref_A0_), ref_B0(ref_B0_), ref_C0(ref_C0_), ref_Scale0(ref_Scale0_), ref_Bias0(ref_Bias0_), ref_B1(ref_B1_), ref_C1(ref_C1_), ref_D1(ref_D1_), epilogue0(epilogue0_), epilogue1(epilogue1_), problem_count(problem_count), threadblock_count(threadblock_count) {} }; /// Parameters structure struct Params { cutlass::gemm::GemmUniversalMode mode = cutlass::gemm::GemmUniversalMode::kGemm; cutlass::gemm::GemmCoord problem_size_0{}; cutlass::gemm::GemmCoord problem_size_1{}; cutlass::gemm::GemmCoord grid_tiled_shape{}; int swizzle_log_tile{0}; typename B2bMma::IteratorA0::Params params_A0{}; typename B2bMma::IteratorA0::TensorRef ref_A0{}; typename B2bMma::IteratorB0::Params params_B0{}; typename B2bMma::IteratorB0::TensorRef ref_B0{}; typename Epilogue::OutputTileIterator::Params params_C0{}; typename Epilogue::OutputTileIterator::TensorRef ref_C0{}; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0{}; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0{}; typename B2bMma::IteratorB1::Params params_B1{}; typename B2bMma::IteratorB1::TensorRef ref_B1{}; typename Epilogue::OutputTileIterator::Params params_C1{}; typename Epilogue::OutputTileIterator::TensorRef ref_C1{}; typename Epilogue::OutputTileIterator::Params params_D1{}; typename Epilogue::OutputTileIterator::TensorRef ref_D1{}; typename OutputOp0::Params output_op_0{}; typename OutputOp1::Params output_op_1{}; int64_t batch_stride_A0{0}; int64_t batch_stride_B0{0}; int64_t batch_stride_B1{0}; int64_t batch_stride_C1{0}; int64_t batch_stride_D1{0}; int64_t batch_stride_Bias0{0}; int64_t batch_stride_Scale0{0}; int *semaphore = nullptr; int gemm_k_iterations_0{0}; int gemm_k_size_0{0}; int gemm_k_iterations_1{0}; int gemm_k_size_1{0}; // // Methods // Params() = default; CUTLASS_HOST_DEVICE Params( cutlass::gemm::GemmUniversalMode mode, cutlass::gemm::GemmCoord const & problem_size_0, cutlass::gemm::GemmCoord const & problem_size_1, cutlass::gemm::GemmCoord const & grid_tiled_shape, typename B2bMma::IteratorA0::TensorRef ref_A0, typename B2bMma::IteratorB0::TensorRef ref_B0, typename Epilogue::OutputTileIterator::TensorRef ref_C0, typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Scale0, typename B2bMma::IteratorAccumulatorScaleBias::TensorRef ref_Bias0, typename B2bMma::IteratorB1::TensorRef ref_B1, typename Epilogue::OutputTileIterator::TensorRef ref_C1, typename Epilogue::OutputTileIterator::TensorRef ref_D1, int64_t batch_stride_A0, int64_t batch_stride_B0, int64_t batch_stride_B1, int64_t batch_stride_C1, int64_t batch_stride_D1, int64_t batch_stride_Bias0, int64_t batch_stride_Scale0, typename OutputOp0::Params output_op_0 = typename OutputOp0::Params(), typename OutputOp1::Params output_op_1 = typename OutputOp1::Params(), int *workspace = nullptr ): mode(mode), problem_size_0(problem_size_0), problem_size_1(problem_size_1), grid_tiled_shape(grid_tiled_shape), swizzle_log_tile(ThreadblockSwizzle::get_log_tile(grid_tiled_shape)), params_A0(ref_A0.layout()), ref_A0(ref_A0), params_B0(ref_B0.layout()), ref_B0(ref_B0), params_C0(ref_C0.layout()), ref_C0(ref_C0), ref_Scale0(ref_Scale0), ref_Bias0(ref_Bias0), params_B1(ref_B1.layout()), ref_B1(ref_B1), params_C1(ref_C1.layout()), ref_C1(ref_C1), params_D1(ref_D1.layout()), ref_D1(ref_D1), batch_stride_A0(batch_stride_A0), batch_stride_B0(batch_stride_B0), batch_stride_B1(batch_stride_B1), batch_stride_C1(batch_stride_C1), batch_stride_D1(batch_stride_D1), batch_stride_Bias0(batch_stride_Bias0), batch_stride_Scale0(batch_stride_Scale0), output_op_0(output_op_0), output_op_1(output_op_1) { int total_gemm_k_iterations_0 = (problem_size_0.k() + B2bMma::Shape0::kK - 1) / B2bMma::Shape0::kK; int gemm_k_iterations_0 = (total_gemm_k_iterations_0 + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); gemm_k_size_0 = gemm_k_iterations_0 * B2bMma::Shape0::kK; int total_gemm_k_iterations_1 = (problem_size_1.k() + B2bMma::Shape1::kK - 1) / B2bMma::Shape1::kK; int gemm_k_iterations_1 = (total_gemm_k_iterations_1 + grid_tiled_shape.k() - 1) / grid_tiled_shape.k(); gemm_k_size_1 = gemm_k_iterations_1 * B2bMma::Shape1::kK; semaphore = workspace; } }; struct GroupedParams { cutlass::gemm::GemmCoord* problem_size_0; cutlass::gemm::GemmCoord* problem_size_1; cutlass::gemm::GemmCoord* grid_tiled_shape; typename B2bMma::IteratorA0::TensorRef* ref_A0; typename B2bMma::IteratorB0::TensorRef* ref_B0; typename Epilogue::OutputTileIterator::TensorRef* ref_C0; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Scale0; typename B2bMma::IteratorAccumulatorScaleBias::TensorRef* ref_Bias0; typename B2bMma::IteratorB1::TensorRef* ref_B1; typename Epilogue::OutputTileIterator::TensorRef* ref_C1; typename Epilogue::OutputTileIterator::TensorRef* ref_D1; // Epilogue params remain constant across all problmes in the group. Thus, // the parameter here is not a pointer. typename OutputOp0::Params output_op_0; typename OutputOp1::Params output_op_1; using ProblemVisitor = typename detail::ProblemVisitorOrDefault<B2bMma, ThreadblockSwizzle>::value; typename ProblemVisitor::Params problem_visitor; int threadblock_count; int* workspace; CUTLASS_HOST_DEVICE GroupedParams() {} CUTLASS_HOST_DEVICE GroupedParams( GroupedArguments const &args, void *workspace = nullptr, int tile_count = 0 ) : problem_size_0(args.problem_size_0), problem_size_1(args.problem_size_1), ref_A0(args.ref_A0), ref_B0(args.ref_B0), ref_C0(args.ref_C0), ref_Scale0(args.ref_Scale0), ref_Bias0(args.ref_Bias0), ref_B1(args.ref_B1), ref_C1(args.ref_C1), ref_D1(args.ref_D1), output_op_0(args.epilogue0), output_op_1(args.epilogue1), problem_visitor(args.problem_size_0, args.problem_size_1, args.problem_count, workspace, tile_count), threadblock_count(args.threadblock_count), workspace(reinterpret_cast<int*>(workspace)) {} CUTLASS_HOST_DEVICE void transpose() { // Only row-major outputs are currently supported, so no transpose is performed } /// Returns non-grouped paramaters to be used as input to the kernel-level /// operator for the problem indicated by problem_visitor. CUTLASS_HOST_DEVICE Params to_single_params(const ProblemVisitor& problem_visitor) const { GemmCoord problem_size0 = problem_visitor.problem_size0(); GemmCoord problem_size1 = problem_visitor.problem_size1(); int32_t idx = problem_visitor.problem_index(); GemmCoord grid_shape = problem_visitor.grid_shape(problem_size1); return Params( cutlass::gemm::GemmUniversalMode::kGemm, problem_size0, problem_size1, grid_shape, ref_A0[idx], ref_B0[idx], ref_C0[idx], ref_Scale0[idx], ref_Bias0[idx], ref_B1[idx], ref_C1[idx], ref_D1[idx], 0, 0, 0, 0, 0, 0, 0, // Batched B2B GEMMs within the grouped kernel are currently unsupported output_op_0, output_op_1, workspace ); } }; /// Shared memory storage structure union SharedStorage { typename B2bMma::B2bMmaSharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; // // Methods // CUTLASS_HOST_DEVICE B2bGemm() { } /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size_0, cutlass::gemm::GemmCoord const & problem_size_1, typename B2bMma::IteratorA0::TensorRef ref_A0, typename B2bMma::IteratorB0::TensorRef ref_B0, typename Epilogue::OutputTileIterator::TensorRef ref_C0, typename B2bMma::IteratorB1::TensorRef ref_B1, typename Epilogue::OutputTileIterator::TensorRef ref_C1, typename Epilogue::OutputTileIterator::TensorRef ref_D1) { static int const kAlignmentA = B2bMma::IteratorA0::AccessType::kElements; static int const kAlignmentB = B2bMma::IteratorB0::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; if (!TensorRef_aligned(ref_A0, kAlignmentA)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B0, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C0, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_B1, kAlignmentB)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_C1, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if (!TensorRef_aligned(ref_D1, kAlignmentC)) { return Status::kErrorMisalignedOperand; } if ((problem_size_0.m() % kAlignmentA) || (problem_size_0.k() % kAlignmentA) || (problem_size_0.n() % kAlignmentB) || (problem_size_0.k() % kAlignmentB) || (problem_size_0.m() % kAlignmentC) || (problem_size_0.n() % kAlignmentC) || (problem_size_1.m() % kAlignmentA) || (problem_size_1.k() % kAlignmentA) || (problem_size_1.n() % kAlignmentB) || (problem_size_1.k() % kAlignmentB) || (problem_size_1.m() % kAlignmentC) || (problem_size_1.n() % kAlignmentC)) { return Status::kErrorMisalignedOperand; } // Determine if fusion sizes are valid if(problem_size_0.m() != problem_size_1.m()) return Status::kErrorInvalidProblem; if(problem_size_0.n() != problem_size_1.k()) return Status::kErrorInvalidProblem; if(problem_size_0.n() > B2bMma::Shape0::kN) return Status::kErrorInvalidProblem; if(problem_size_1.n() > B2bMma::Shape1::kN) return Status::kErrorInvalidProblem; return Status::kSuccess; } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { ThreadblockSwizzle threadblock_swizzle; run_with_swizzle(params, shared_storage, threadblock_swizzle); } /// Executes one GEMM with an externally-provided swizzling function CUTLASS_DEVICE void run_with_swizzle(Params const &params, SharedStorage &shared_storage, ThreadblockSwizzle& threadblock_swizzle) { cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } ElementA0 *ptr_A0 = static_cast<ElementA0 *>(params.ref_A0.data()); ElementB0 *ptr_B0 = static_cast<ElementB0 *>(params.ref_B0.data()); ElementB1 *ptr_B1 = static_cast<ElementB1 *>(params.ref_B1.data()); ScaleBiasData *ptr_Bias0 = static_cast<ScaleBiasData *>(params.ref_Bias0.data()); ScaleBiasData *ptr_Scale0 = static_cast<ScaleBiasData *>(params.ref_Scale0.data()); int offset_k_0 = 0; int offset_k_1 = 0; int problem_size_k_0 = params.problem_size_0.k(); int problem_size_k_1 = params.problem_size_1.k(); if (params.mode == GemmUniversalMode::kGemm) { // Problem size is a function of threadblock index in the K dimension problem_size_k_0 = min( problem_size_k_0, (threadblock_tile_offset.k() + 1) * params.gemm_k_size_0); // Problem size is a function of threadblock index in the K dimension problem_size_k_1 = min( problem_size_k_1, (threadblock_tile_offset.k() + 1) * params.gemm_k_size_1); offset_k_0 = threadblock_tile_offset.k() * params.gemm_k_size_0; offset_k_1 = threadblock_tile_offset.k() * params.gemm_k_size_1; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A0 += threadblock_tile_offset.k() * params.batch_stride_A0; ptr_B0 += threadblock_tile_offset.k() * params.batch_stride_B0; ptr_B1 += threadblock_tile_offset.k() * params.batch_stride_B1; ptr_Bias0 += threadblock_tile_offset.k() * params.batch_stride_Bias0; ptr_Scale0 += threadblock_tile_offset.k() * params.batch_stride_Scale0; } // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A0{ threadblock_tile_offset.m() * B2bMma::Shape0::kM, offset_k_0, }; cutlass::MatrixCoord tb_offset_B0{ offset_k_0, threadblock_tile_offset.n() * B2bMma::Shape0::kN }; cutlass::MatrixCoord tb_offset_B1{ offset_k_1, threadblock_tile_offset.n() * B2bMma::Shape1::kN }; // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations_0 = (problem_size_k_0 - tb_offset_A0.column() + B2bMma::Shape0::kK - 1) / B2bMma::Shape0::kK; // Compute threadblock-scoped matrix multiply-add // int gemm_k_iterations_1 = (problem_size_k_1 - tb_offset_B1.row() + B2bMma::Shape1::kK - 1) / B2bMma::Shape1::kK; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename B2bMma::IteratorA0 iterator_A0( params.params_A0, ptr_A0, {params.problem_size_0.m(), problem_size_k_0}, thread_idx, tb_offset_A0); typename B2bMma::IteratorB0 iterator_B0( params.params_B0, ptr_B0, {problem_size_k_0, params.problem_size_0.n()}, thread_idx, tb_offset_B0); typename B2bMma::IteratorB1 iterator_B1( params.params_B1, ptr_B1, {problem_size_k_1, params.problem_size_1.n()}, thread_idx, tb_offset_B1); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = __shfl_sync(0xffffffff, threadIdx.x / 32, 0); int lane_idx = threadIdx.x % 32; // Construct iterators to accumulator scale/bias vector typename B2bMma::IteratorAccumulatorScaleBias iterator_Scale0( ptr_Scale0, {1, params.problem_size_0.n()}, thread_idx, warp_idx, MatrixCoord( 0, threadblock_tile_offset.n() * B2bMma::Shape0::kN ) ); typename B2bMma::IteratorAccumulatorScaleBias iterator_Bias0( ptr_Bias0, {1, params.problem_size_0.n()}, thread_idx, warp_idx, MatrixCoord( 0, threadblock_tile_offset.n() * B2bMma::Shape0::kN ) ); // // Main loop // OutputOp0 output_op_0(params.output_op_0); if (cutlass::gemm::threadblock::detail::IsGroupedSwizzle<ThreadblockSwizzle>::value) { // Wait for all threads to finish their epilogue phases from the previous tile. __syncthreads(); } // Construct thread-scoped matrix multiply B2bMma b2bMma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx, params.problem_size_0.n()); typename B2bMma::FragmentC0 src_accum; typename B2bMma::FragmentC1 accumulators; src_accum.clear(); accumulators.clear(); // Compute threadblock-scoped matrix multiply-add b2bMma(gemm_k_iterations_0, accumulators, iterator_A0, iterator_B0, iterator_Scale0, iterator_Bias0, iterator_B1, src_accum, output_op_0); // // Epilogue // OutputOp1 output_op_1(params.output_op_1); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * B2bMma::Shape1::kM, threadblock_tile_offset.n() * B2bMma::Shape1::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C1 = static_cast<ElementC *>(params.ref_C1.data()); ElementC *ptr_D1 = static_cast<ElementC *>(params.ref_D1.data()); // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op_1.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C1 += threadblock_tile_offset.k() * params.batch_stride_C1; ptr_D1 += threadblock_tile_offset.k() * params.batch_stride_D1; } // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C1( params.params_C1, ptr_C1, params.problem_size_1.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D1( params.params_D1, ptr_D1, params.problem_size_1.mn(), thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C1 = iterator_D1; } semaphore.wait(threadblock_tile_offset.k()); __threadfence(); } // Execute the epilogue operator to update the destination tensor. epilogue(output_op_1, iterator_D1, accumulators, iterator_C1); // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } __threadfence(); semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass
cutlass/examples/13_two_tensor_op_fusion/kernel/b2b_gemm.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/b2b_gemm.h", "repo_id": "cutlass", "token_count": 12865 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** This example shows how to run CUTLASS's convolution kernels based on the Implicit GEMM algorithm, that use the Tensor Cores on an NVIDIA Ampere GPU. Writing a single high-performance convolution kernel is hard enough, let alone writing kernels that perform well for multiple problem sizes and use good software abstractions. CUTLASS provides simplified abstractions to compose multiple sections of a convolution kernel. When used properly, the kernels can reach peak GPU performance. CUTLASS divides a kernel into hierarchical composable sections for each level of the GPU hardware hierarchy: thread, warp, and threadblock. Each section computes on its own tile shape, with each higher level's tile shape being composed from lower-level tile shapes. Multiple thread tiles (the tile shape each thread computes) can be used to form warp tiles (the tile shape each warp computes), and multiple warp tiles can be used to compute threadblock tiles (the tile shape computed by a threadblock). In this example, we split variable initialization into two parts. 1. Setting up data properties: describes how tensors are laid out in the memory and how the kernel can view them (logical to physical mapping) 2. Setting up computation properties: describes how the above tensors will be used to compute the output of convolution We begin by setting up the data types of all the input and output elements of a convolution. A convolution computes C = alpha * Conv2dFprop(A, B) + beta * C, so we set up data types for the input tensor A, weights tensor B, output tensor C, and the scaling factors alpha and beta. CUTLASS divides the convolution into two parts: the "mainloop" that computes X = Conv2dFprop(A, B), and the "epilogue" that computes C = alpha * X + beta * C. The epilogue is an element-wise operation on X and C. In this case, it is a linear combination, but other epilogues are possible. In this example, we want * the scaling factors alpha and beta to be float, * the elements of A and B to be cutlass::half_t (a 16-bit floating-point type), * the elements of C to be float, and * intermediate sums to be accumulated in float. We convey this to the CUTLASS kernel by setting the following template parameters. * alpha and beta: ElementComputeEpilogue = float * Elements of input tensor A: ElementInputA = cutlass::half_t * Elements of input tensor B: ElementInputB = cutlass::half_t * Elements of output tensor C: ElementOutput = float * Accumulation type: ElementAccumulator = float Next, we describe the layout of the input and output tensors. We convey this to the CUTLASS kernel by setting the following template parameters. * Layout of input tensor A: LayoutInputA = TensorNHWC * Layout of input tensor B: LayoutInputB = TensorNHWC * Layout of output tensor C: LayoutOutput = TensorNHWC After that, we set up rules to compute the epilogue. The epilogue in this case is a simple linear combination C = alpha * X + beta * C. Thus, we set the kernel's template parameter EpilogueOp to LinearCombination. LinearCombination itself has template parameters: * the element type of the output tensor (ElementOutput), * the number of elements per vector memory access (8), * the data type of the accumulator (ElementAccumulator), * and the data type used to compute the linear combination (ElementComputeEpilogue). We then define the tile shapes that each level of the computation uses. We define these as types that encode the tile shapes as compile-time integer values. Each shape expresses the dimensions M x N x K. Here, the letters refer to the dimensions of a matrix-matrix multiply. * ThreadblockShape defines the threadblock tile shape as 128 x 128 x 64. * WarpShape defines the warp tile shape as 64 x 64 x 64. * InstructionShape defines the MMA (matrix multiply-accumulate) operation shape as 16 x 8 x 16. These types become template arguments of the kernel properties type cutlass::conv::kernel::DefaultConv2dFprop. The kernel uses these shapes to deduce the number of threads needed per threadblock, the required amount of shared memory, the internal layouts needed to access shared memory without bank conflicts, and many other properties that the kernel needs for good performance. CUTLASS deduces all these properties automatically, so that users don't have to. DefaultConv2dFprop accepts other template parameters that describe things like the target CUDA SM architecture. CUTLASS also supports multiple MMA pipelines in a threadblock. An MMA pipeline constitutes the whole process of loading input data from global memory to shared memory, loading data from shared memory to registers, doing matrix multiplication, and storing the result to global memory. The below flow sequence shows a typical MMA multistage pipeline (see include/cutlass/conv/threadblock/implicit_gemm_multistage.h). tensor in global memory --cp_async--> tile in shared memory --smem loads--> registers --mma--> registers --global stores--> output to global memory On NVIDIA Ampere, the kernel uses `cp_async` to build a multistage software pipeline. This helps it better hide latency. At this point, we can define the actual CUTLASS kernel type as the alias ImplicitGemm, a specialization of cutlass::conv::device::ImplicitGemmConvolution. The latter accepts the kernel properties type alias Conv2dFpropKernel as its one template argument. This example then sets up a test problem and arguments to the kernel. We use CUTLASS utilities to allocate the input and output tensors and fill them with sample input data. We then create the kernel arguments as an instance of ImplicitGemm::Arguments. The arguments include the problem size (N = 1, H = 64, W = 64, C = 128), filter size (K = 64, R = 3, S = 3, C = 128), padding, strides, dilation, tensors, alpha, beta, and the split k-dimension factor. We also query CUTLASS if the kernel we instantiated requires any memory for scratch space. If yes, we reserve scratch space and pass it along with other arguments to initialize the CUTLASS kernel. After lauching the CUTLASS kernel, this example runs a reference convolution kernel (from CUTLASS utilities) to check correctness. */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/conv/kernel/default_conv2d_fprop.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // Data types for input and output tensors // and computation between elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = float; // Data type of epilogue computation (alpha, beta) using ElementInputA = cutlass::half_t; // Data type of elements in input tensor using ElementInputB = cutlass::half_t; // Data type of elements in input tensor using ElementOutput = float; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::TensorNHWC; using LayoutInputB = cutlass::layout::TensorNHWC; using LayoutOutput = cutlass::layout::TensorNHWC; // Whether to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // SM architecture number using SmArch = cutlass::arch::Sm80; // Threadblock tile shape using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 64>; // Warp tile shape using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; // MMA (Tensor Core instruction, in this case) tile shape using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // How the kernel schedules threadblocks using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // Number of pipeline stages to use constexpr int NumStages = 3; // Which iterator algorithm to use: Analytic or Optimized static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized; // Is the output packed or strided // Use kStride if using strided output static cutlass::conv::StrideSupport const OutputStride = cutlass::conv::StrideSupport::kUnity; // The epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; // Data type for alpha/beta in linear combination // Kernel properties type using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, cutlass::arch::OpMultiplyAdd, IteratorAlgorithm, OutputStride >::Kernel; // Type of the actual kernel using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::Tensor4DCoord input_size; cutlass::Tensor4DCoord filter_size; cutlass::Tensor4DCoord padding; cutlass::MatrixCoord conv_stride; cutlass::MatrixCoord dilation; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), input_size(1, 32, 32, 32), filter_size(32, 3, 3, 32), padding(1, 1, 1, 1), conv_stride(1, 1), dilation(1, 1), reference_check(false), measure_performance(true), iterations(20), save_workspace(false), alpha(1), beta(0), benchmark(false) { } // Verify that the problem size is compatible with CUTLASS's convolution implementation bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((input_size.c() % kAlignment) || (filter_size.n() % kAlignment)) { // misaligned tensors return false; } // Invalid padding if ((padding.h() != filter_size.h() / 2) || (padding.w() != filter_size.w() / 2)) { return false; } return true; } /// Update input and filter sizes void update( cutlass::Tensor4DCoord input_size, cutlass::Tensor4DCoord filter_size) { this->input_size = input_size; this->filter_size = filter_size; padding.n() = filter_size.h() / 2; padding.h() = filter_size.h() / 2; padding.w() = filter_size.w() / 2; padding.c() = filter_size.w() / 2; } // Parse command-line arguments void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("n", input_size.n()); cmd.get_cmd_line_argument("h", input_size.h()); cmd.get_cmd_line_argument("w", input_size.w()); cmd.get_cmd_line_argument("c", input_size.c()); cmd.get_cmd_line_argument("k", filter_size.n()); cmd.get_cmd_line_argument("r", filter_size.h()); cmd.get_cmd_line_argument("s", filter_size.w()); filter_size.c() = input_size.c(); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); if (filter_size.h() == 3 && filter_size.w() == 3) { padding = {1, 1, 1, 1}; } else { filter_size.h() = 1; filter_size.w() = 1; padding = {0, 0, 0, 0}; } } /// Print an explanation of the command-line arguments std::ostream & print_usage(std::ostream &out) const { out << "16_ampere_tensorop_conv2dfprop example\n\n" << " This example uses Ampere's Tensor Core operators on F16 data types\n" << " to compute forward convolution on tensors of layout NHWC.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --n=<int> Input tensor extent N\n" << " --h=<int> Input tensor extent H\n" << " --w=<int> Input tensor extent W\n" << " --c=<int> Input tensor extent C\n" << " --k=<int> Filter extent K\n" << " --r=<int> Filter extent R\n" << " --s=<int> Filter extent S\n\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several layers and batch-size.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=32 --h=224 --w=224 --c=128 --k=256 --r=1 --s=1\n\n" << "$ ./examples/16_ampere_tensorop_conv2dfprop/16_ampere_tensorop_conv2dfprop --n=1 --h=224 --w=224 --c=32 --k=32 --r=3 --s=3 --ref-check\n\n"; return out; } /// Computes the output tensor size (NPQK) cutlass::Tensor4DCoord output_size() const { return cutlass::Tensor4DCoord( input_size.n(), (input_size.h() + padding.n() + padding.h() - filter_size.h()) / conv_stride.row() + 1, (input_size.w() + padding.w() + padding.c() - filter_size.w()) / conv_stride.column() + 1, filter_size.n()); } /// Compute performance in Gflop/s /// /// Gflop/s stands for billions (10^9) of /// floating-point operations per second (Gflop/s). double gflops(double runtime_s) const { // Number of multiply-adds = NPQK * CRS int64_t fmas = output_size().product() * int64_t(filter_size.h() * filter_size.w() * filter_size.c()); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; struct Result { double runtime_ms; double gflops; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), gflops(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream& print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "Layer,N,H,W,C,K,R,S,Runtime,GFLOPs"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "conv_" << idx << "," << options.input_size.n() << "," << options.input_size.h() << "," << options.input_size.w() << "," << options.input_size.c() << "," << options.filter_size.n() << "," << options.filter_size.h() << "," << options.filter_size.w() << "," << runtime_ms << "," << gflops; return out; } }; /// Runs one benchmark Result profile_convolution(Options const &options) { Result result; // // Allocate host-device tensors using the CUTLASS Utilities. // cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.input_size); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.filter_size); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.output_size()); cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.output_size()); // // Initialize tensors // // Fill tensor A on host with uniformly distributed random data cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(7), ElementInputA(-8), 0); // Fill tensor B on host with uniformly distributed random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(7), ElementInputB(-8), 0); // Fill tensor C on host with uniformly distributed random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(7), ElementOutput(-8), 0); // Fill tensor D on host with zeros cutlass::reference::host::TensorFill( tensor_d.host_view()); // Fill tensor D for reference on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // // Define arguments for CUTLASS Convolution // cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation; // Split K dimension into 1 partitions int split_k_slices = 1; // Construct Conv2dProblemSize with user defined output size cutlass::conv::Conv2dProblemSize problem_size( options.input_size, options.filter_size, options.padding, options.conv_stride, options.dilation, options.output_size(), mode, split_k_slices ); // Construct ImplicitGemm::Argument structure with conv2d // problem size, data pointers, and epilogue values typename ImplicitGemm::Arguments arguments{ problem_size, tensor_a.device_ref(), tensor_b.device_ref(), tensor_c.device_ref(), tensor_d.device_ref(), {options.alpha, options.beta}, }; // // Initialize CUTLASS Convolution // ImplicitGemm implicit_gemm_op; size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); result.status = implicit_gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); result.status = implicit_gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // // Launch initialized CUTLASS kernel // result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); // // Optional reference check // if (options.reference_check) { std::cout << "Verification on host...\n"; // Compute with reference implementation cutlass::reference::host::Conv2dFprop< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator >( problem_size, tensor_a.host_ref(), tensor_b.host_ref(), tensor_c.host_ref(), tensor_ref_d.host_ref(), options.alpha, options.beta ); // Check if CUTLASS kernel and reference kernel produced the same output tensor_d.sync_host(); bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); if (!passed) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "16_ampere_workspace_conv2dfprop_" << options.input_size.n() << "x" << options.input_size.h() << "x" << options.input_size.w() << "x" << options.input_size.c() << "_" << options.filter_size.n() << "x" << options.filter_size.h() << "x" << options.filter_size.w() << "x" << options.filter_size.c() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "Input = \n" << tensor_a.host_view() << "\n\n" << "Filters = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference = \n" << tensor_ref_d.host_view() << "\n\n"; } output_workspace << "Computed = \n" << tensor_d.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device. for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = implicit_gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime. float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average run time and floating-point throughput (Gflop/s). result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers int batch_sizes[] = {1, 32, 64, 128, 256, 512}; struct Benchmark { int h, w, c, k, r, s; } layers[] = { {56, 56, 64, 256, 1, 1}, {56, 56, 64, 64, 1, 1}, {56, 56, 64, 64, 3, 3}, {56, 56, 256, 64, 1, 1}, {56, 56, 256, 512, 1, 1}, {56, 56, 256, 128, 1, 1}, {28, 28, 128, 128, 3, 3}, {28, 28, 128, 512, 1, 1}, {28, 28, 512, 128, 1, 1}, {28, 28, 512, 1024, 1, 1}, {28, 28, 512, 256, 1, 1}, {14, 14, 256, 256, 3, 3}, {14, 14, 256, 1024, 1, 1}, {14, 14, 1024, 256, 1, 1}, {14, 14, 1024, 2048, 1, 1}, {14, 14, 1024, 512, 1, 1}, {7, 7, 512, 512, 3, 3}, }; Result::print_header(std::cout, options) << std::endl; int idx = 1; for (auto const &layer : layers) { for (auto N : batch_sizes) { options.update({N, layer.h, layer.w, layer.c}, {layer.k, layer.r, layer.s, layer.c}); Result result = profile_convolution(options); result.print(std::cout, idx, options) << std::endl; } ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } Result result = profile_convolution(options); Result::print_header(std::cout, options) << std::endl; result.print(std::cout, 1, options) << std::endl; } return 0; }
cutlass/examples/16_ampere_tensorop_conv2dfprop/ampere_tensorop_conv2dfprop.cu/0
{ "file_path": "cutlass/examples/16_ampere_tensorop_conv2dfprop/ampere_tensorop_conv2dfprop.cu", "repo_id": "cutlass", "token_count": 10130 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief GEMM Grouped Example. This workload computes a batch of GEMM operations with distinct problem sizes. Pointers to matrices in Global Memory are passed to the kernel in array (also held in Global Memory). Similarly, leading dimensions and problem sizes are stored in arrays in GMEM. This differs from "Batched Array" GEMM because the size of each GEMM problem in the Grouped GEMM concept may be distinct. This benchmark program initializes a workspace with random problem sizes for a given number of groups. Command line options enable overriding M, N, and/or K dimensions with uniform values to model problems more similar to the traditional batched GEMM. Additionally, problem sizes are collected and binned to compute the same problem as a series of conventional batched GEMMs (setup for this problem is not timed). This demonstrates the performance enhancement achieved by implementing a specialized grouped GEMM kernel. Examples: # Runs a grouped GEMM with 100 random problem sizes $ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 # Runs a grouped GEMM with 100 random problem sizes (with GEMM-K dimension equal to 1024) $ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true # Runs a grouped GEMM that is equivalent to a batched GEMM $ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --m=2048 --n=1024 --k=1024 --verbose=true # Execute Grouped GEMM and profile with NSight $ nv-nsight-cu-cli ./examples/24_gemm_grouped/24_gemm_grouped --m=256 --n=256 --k=256 --verbose=true \ --iterations=1 --reference-check=false */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <chrono> #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <map> #include <unordered_map> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/kernel/gemm_grouped.h" #include "cutlass/gemm/kernel/default_gemm_grouped.h" #include "cutlass/gemm/device/gemm_grouped.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double initialization_time_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double initialization_time_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), initialization_time_ms(initialization_time_ms), gflops(gflops), status(status), error(error), passed(true) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Hash function for cutlass::gemm::GemmCoord struct HashGemmCoord { size_t operator()(cutlass::gemm::GemmCoord const &problem) const { std::hash<int> hasher; return (hasher(problem.m() * 3)) ^ (hasher(1 + problem.n() * 5)) ^ (hasher(2 + problem.k() * 7)); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; bool reference_check; bool profile_initialization; bool sort_problems; std::vector<cutlass::gemm::GemmCoord> problem_sizes; // problem size bins std::unordered_map< cutlass::gemm::GemmCoord, std::vector<int32_t>, HashGemmCoord> problem_bins; int alignment; int problem_count; int iterations; int cuda_streams; bool verbose; float alpha; float beta; std::string benchmark_path; std::string output_tag; std::ofstream output_file; using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode; std::vector<GroupScheduleMode> scheduler_modes; std::unordered_map<std::string, GroupScheduleMode> str_to_scheduler_mode = { {"kDeviceOnly", GroupScheduleMode::kDeviceOnly}, {"kHostPrecompute", GroupScheduleMode::kHostPrecompute} }; struct GroupScheduleModeHash { size_t operator()(GroupScheduleMode m) const { return static_cast<size_t>(m); } }; std::unordered_map<GroupScheduleMode, std::string, GroupScheduleModeHash> scheduler_mode_to_str = { {GroupScheduleMode::kDeviceOnly, "kDeviceOnly"}, {GroupScheduleMode::kHostPrecompute, "kHostPrecompute"} }; std::vector<GroupScheduleMode> all_scheduler_modes = {GroupScheduleMode::kDeviceOnly, GroupScheduleMode::kHostPrecompute}; // // Methods // Options(): help(false), error(false), alignment(8), reference_check(true), profile_initialization(false), sort_problems(false), problem_count(15), iterations(20), cuda_streams(0), verbose(false), alpha(1), beta(), scheduler_modes({GroupScheduleMode::kDeviceOnly}) { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("alignment", alignment, 8); cmd.get_cmd_line_argument("groups", problem_count, 15); cmd.get_cmd_line_argument("alpha", alpha, 1.0f); cmd.get_cmd_line_argument("beta", beta, 0.0f); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("streams", cuda_streams, 0); cmd.get_cmd_line_argument("verbose", verbose, false); cmd.get_cmd_line_argument("reference-check", reference_check, true); cmd.get_cmd_line_argument("profile-initialization", profile_initialization, false); cmd.get_cmd_line_argument("sort-problems", sort_problems, false); cmd.get_cmd_line_argument("benchmark", benchmark_path); std::vector<std::string> scheduler_mode_strs; cmd.get_cmd_line_arguments("scheduler-modes", scheduler_mode_strs); if (!scheduler_mode_strs.empty()) { scheduler_modes.clear(); if (scheduler_mode_strs.size() == 1 && scheduler_mode_strs[0] == "all") { scheduler_modes = all_scheduler_modes; } else { for (std::string precomp_str : scheduler_mode_strs) { auto it = str_to_scheduler_mode.find(precomp_str); if (it != str_to_scheduler_mode.end()) { scheduler_modes.push_back(it->second); } else if (precomp_str == "all") { std::cerr << "Flag --scheduler-modes=all must not contain other scheduler modes in list." << std::endl; error = true; return; } else { std::cerr << "Unrecognized scheduler mode '" << precomp_str << "'" << std::endl; error = true; return; } } } } std::string output_path; cmd.get_cmd_line_argument("tag", output_tag); cmd.get_cmd_line_argument("output_file", output_path); if (!output_path.empty()) { std::ios_base::openmode open_mode = std::ios_base::out; std::ifstream input_file(output_path.c_str()); if (input_file.good()) { open_mode = std::ios_base::app; input_file.close(); } output_file.open(output_path.c_str(), open_mode); if (output_file.good() && open_mode != std::ios_base::app) { output_file << "Tag,Provider,Kind,Groups,Runtime,GFLOPs\n"; } } // Decide how to initialize the problems if (!benchmark_path.empty()) { if (!benchmark_problems()) { error = true; problem_sizes.clear(); return; } } else { randomize_problems(cmd); } // Post-process the problem sizes bin_problems(); } void randomize_problems(cutlass::CommandLine &cmd) { // // For now, randomly choose the problem sizes. // int cmd_line_m = -1; int cmd_line_n = -1; int cmd_line_k = -1; cmd.get_cmd_line_argument("m", cmd_line_m); cmd.get_cmd_line_argument("n", cmd_line_n); cmd.get_cmd_line_argument("k", cmd_line_k); problem_sizes.reserve(problem_count); for (int i = 0; i < problem_count; ++i) { int m = cmd_line_m; int n = cmd_line_n; int k = cmd_line_k; if (m < 1) { m = alignment * ((rand() % 256) + 1); } if (n < 1) { n = alignment * ((rand() % 256) + 1); } if (k < 1) { k = alignment * ((rand() % 256) + 1); } cutlass::gemm::GemmCoord problem(m, n, k); problem_sizes.push_back(problem); } } /// Load a benchmark bool benchmark_problems() { std::ifstream file(benchmark_path); if (!file.good()) { return false; } while (file.good()) { int idx = -1; std::string extent_str; file >> idx >> extent_str; if (idx < 0 || extent_str.empty()) { break; } cutlass::gemm::GemmCoord extent; std::vector<std::string> tokens; cutlass::CommandLine::tokenize(tokens, extent_str, 'x'); for (int i = 0; i < int(tokens.size()); ++i) { int x = std::atoi(tokens.at(i).c_str()); // round up if (x % alignment) { x += (alignment - (x % alignment)); } extent.at(i) = x; } if (extent.product()) { problem_sizes.push_back(extent); } } return true; } /// Post processes the problems void bin_problems() { problem_bins.clear(); problem_count = int(problem_sizes.size()); // // Insert the problem sizes into a sorted container class. This is *NOT* necessary // to run the CUTLASS kernel, but it enables the execution of cublas's batched GEMM. // for (int i = 0; i < int(problem_sizes.size()); ++i) { auto it = problem_bins.find(problem_sizes.at(i)); if (it == problem_bins.end()) { problem_bins.insert({problem_sizes.at(i), std::vector<int32_t>({i}) }); } else { it->second.push_back(i); } } } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "24_gemm_grouped\n\n" << " This example profiles the performance of a 'grouped' GEMM kernel. This is similar to batched GEMM\n" << " in that multiple, independent GEMMs are computed by one grid launch. It differs in that each\n" << " 'group' may compute a unique problem size. Problem sizes and pointers to matrices are both stored\n" << " in device Global Memory and loaded by the kernel.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --benchmark=<str> Executes a benchmark problem size.\n" << " --output_file=<str> Path to a CSV file to output results. If it exists already, results are appended.\n" << " --tag=<str> String tag to prepend to the CSV file.\n" << " --groups=<int> Number of individual GEMM problems (default: --groups=15)\n" << " --m=<int> Sets the M dimension for all groups. Otherwise, it is selected randomly\n" << " --n=<int> Sets the N dimension for all groups. Otherwise, it is selected randomly\n" << " --k=<int> Sets the K dimension for all groups. Otherwise, it is selected randomly\n" << " --alpha=<f32> Epilogue scalar alpha (real part)\n" << " --beta=<f32> Epilogue scalar beta (real part)\n" << " --scheduler-modes=<str> List of scheduler modes to be profile for grouped GEMM scheduler (default: --scheduler_modes=kDeviceOnly)\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --reference-check=<bool> If true, performs reference check.\n" << " --verbose=<bool> If true, prints problem sizes and batching structure.\n" << " --profile-initialization=<bool> If true, profiles the device-level kernel's initialization.\n" << " --sort-problems=<bool> If true, sorts problem sizes in descending order of GEMM-K dimension.\n"; out << "\n\nExamples:\n\n" << "# Runs a grouped GEMM with 100 random problem sizes\n" << "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100\n\n" << "# Runs a grouped GEMM with 100 random problem sizes (with GEMM-K dimension equal to 1024)\n" << "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --k=1024 --verbose=true\n\n" << "# Runs a grouped GEMM that is equivalent to a batched GEMM\n" << "$ ./examples/24_gemm_grouped/24_gemm_grouped --groups=100 --m=2048 --n=1024 --k=1024 --verbose=true\n\n" << "# Runs a grouped GEMM with each different scheduler mode\n" << "$ ./examples/24_gemm_grouped/24_gemm_grouped --scheduler-modes=all\n\n" << "# Runs a grouped GEMM with each different scheduler mode and profiles host-side initialization time\n" << "$ ./examples/24_gemm_grouped/24_gemm_grouped --scheduler-modes=all --profile-initialization=true\n\n" << "# Runs a grouped GEMM problem given an externally supplied benchmark file. This is a text file in which\n" << "# Each line contains a unique group index and an MxNxK triple indicating problemsize.\n" << "#\n" << "# For example, assume the following are the contents of 'problems.txt'\n" << "#\n" << "# 0 1024x256x520\n" << "# 1 520x264x1024\n" << "# 2 96x48x1024\n" << "#\n" << "$ ./examples/24_gemm_grouped/24_gemm_grouped --benchmark=problems.txt\n\n" << "# Execute Grouped GEMM and profile with NSight\n" << "$ nv-nsight-cu-cli ./examples/24_gemm_grouped/24_gemm_grouped --m=256 --n=256 --k=256 --verbose=true --iterations=1 --reference-check=false\n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = int64_t(); for (auto const & problem : problem_sizes) { fmas += problem.product(); } // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Gemm> class BaseTestbed { public: // // Type definitions // using ElementA = typename Gemm::ElementA; using ElementB = typename Gemm::ElementB; using ElementC = typename Gemm::ElementC; using ElementAccumulator = typename Gemm::ElementAccumulator; using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; using ElementCompute = typename EpilogueOutputOp::ElementCompute; using LayoutA = typename Gemm::LayoutA; using LayoutB = typename Gemm::LayoutB; using LayoutC = typename Gemm::LayoutC; using MatrixCoord = typename LayoutC::TensorCoord; // // Data members // Options & options; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint32_t seed; cutlass::DeviceAllocation<cutlass::gemm::GemmCoord> problem_sizes_device; std::vector<int64_t> offset_A; std::vector<int64_t> offset_B; std::vector<int64_t> offset_C; std::vector<int64_t> offset_D; std::vector<int64_t> lda_host; std::vector<int64_t> ldb_host; std::vector<int64_t> ldc_host; std::vector<int64_t> ldd_host; cutlass::DeviceAllocation<int64_t> lda; cutlass::DeviceAllocation<int64_t> ldb; cutlass::DeviceAllocation<int64_t> ldc; cutlass::DeviceAllocation<int64_t> ldd; cutlass::DeviceAllocation<ElementA> block_A; cutlass::DeviceAllocation<ElementB> block_B; cutlass::DeviceAllocation<ElementC> block_C; cutlass::DeviceAllocation<ElementC> block_D; cutlass::DeviceAllocation<ElementA *> ptr_A; cutlass::DeviceAllocation<ElementB *> ptr_B; cutlass::DeviceAllocation<ElementC *> ptr_C; cutlass::DeviceAllocation<ElementC *> ptr_D; BaseTestbed( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } int problem_count() const { return options.problem_count; } /// Helper to initialize a tensor view template <typename Element> void initialize_tensor( Element *ptr, size_t capacity, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<typename Gemm::ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( ptr, capacity, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::device::BlockFillRandomGaussian( ptr, capacity, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(1), Element()); } else { // Fill with all 1s cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(), Element(1)); } } /// Allocates device-side data void allocate() { int64_t total_elements_A = 0; int64_t total_elements_B = 0; int64_t total_elements_C = 0; int64_t total_elements_D = 0; lda_host.resize(problem_count()); ldb_host.resize(problem_count()); ldc_host.resize(problem_count()); ldd_host.resize(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { auto problem = options.problem_sizes.at(i); lda_host.at(i) = LayoutA::packed({problem.m(), problem.k()}).stride(0); ldb_host.at(i) = LayoutB::packed({problem.k(), problem.n()}).stride(0); ldc_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0); ldd_host.at(i) = LayoutC::packed({problem.m(), problem.n()}).stride(0); offset_A.push_back(total_elements_A); offset_B.push_back(total_elements_B); offset_C.push_back(total_elements_C); offset_D.push_back(total_elements_D); int64_t elements_A = problem.m() * problem.k(); int64_t elements_B = problem.k() * problem.n(); int64_t elements_C = problem.m() * problem.n(); int64_t elements_D = problem.m() * problem.n(); total_elements_A += elements_A; total_elements_B += elements_B; total_elements_C += elements_C; total_elements_D += elements_D; } lda.reset(problem_count()); ldb.reset(problem_count()); ldc.reset(problem_count()); ldd.reset(problem_count()); block_A.reset(total_elements_A); block_B.reset(total_elements_B); block_C.reset(total_elements_C); block_D.reset(total_elements_D); } /// Initializes device-side data void initialize() { problem_sizes_device.reset(problem_count()); problem_sizes_device.copy_from_host(options.problem_sizes.data()); lda.copy_from_host(lda_host.data()); ldb.copy_from_host(ldb_host.data()); ldc.copy_from_host(ldc_host.data()); ldd.copy_from_host(ldd_host.data()); // // Assign pointers // std::vector<ElementA *> ptr_A_host(problem_count()); std::vector<ElementB *> ptr_B_host(problem_count()); std::vector<ElementC *> ptr_C_host(problem_count()); std::vector<ElementC *> ptr_D_host(problem_count()); for (int32_t i = 0; i < problem_count(); ++i) { ptr_A_host.at(i) = block_A.get() + offset_A.at(i); ptr_B_host.at(i) = block_B.get() + offset_B.at(i); ptr_C_host.at(i) = block_C.get() + offset_C.at(i); ptr_D_host.at(i) = block_D.get() + offset_D.at(i); } ptr_A.reset(problem_count()); ptr_A.copy_from_host(ptr_A_host.data()); ptr_B.reset(problem_count()); ptr_B.copy_from_host(ptr_B_host.data()); ptr_C.reset(problem_count()); ptr_C.copy_from_host(ptr_C_host.data()); ptr_D.reset(problem_count()); ptr_D.copy_from_host(ptr_D_host.data()); // // Initialize the problems of the workspace // initialize_tensor(block_A.get(), block_A.size(), init_A, seed * 2021); initialize_tensor(block_B.get(), block_B.size(), init_B, seed * 2022); initialize_tensor(block_C.get(), block_C.size(), init_C, seed * 2023); cutlass::reference::device::BlockFillSequential( block_D.get(), block_D.size(), ElementC(), ElementC()); } /// Verifies the result is a GEMM bool verify() { bool passed = true; for (int32_t i = 0; i < problem_count(); ++i) { cutlass::gemm::GemmCoord problem = options.problem_sizes.at(i); LayoutA layout_A(lda_host.at(i)); LayoutB layout_B(ldb_host.at(i)); LayoutC layout_C(ldc_host.at(i)); LayoutC layout_D(ldd_host.at(i)); MatrixCoord extent_A{problem.m(), problem.k()}; MatrixCoord extent_B{problem.k(), problem.n()}; MatrixCoord extent_C{problem.m(), problem.n()}; cutlass::TensorView<ElementA, LayoutA> view_A(block_A.get() + offset_A.at(i), layout_A, extent_A); cutlass::TensorView<ElementB, LayoutB> view_B(block_B.get() + offset_B.at(i), layout_B, extent_B); cutlass::TensorView<ElementC, LayoutC> view_C(block_C.get() + offset_C.at(i), layout_C, extent_C); cutlass::DeviceAllocation<ElementC> block_Ref(layout_D.capacity(extent_C)); cutlass::TensorView<ElementC, LayoutC> view_Ref_device(block_Ref.get(), layout_D, extent_C); // Reference GEMM cutlass::reference::device::GemmComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementCompute, ElementAccumulator >( problem, options.alpha, view_A, Gemm::kTransformA, view_B, Gemm::kTransformB, options.beta, view_C, view_Ref_device, ElementAccumulator(0) ); // Copy to host memory std::vector<ElementC> matrix_D(layout_D.capacity(extent_C)); std::vector<ElementC> matrix_Ref(layout_D.capacity(extent_C)); cutlass::device_memory::copy_to_host(matrix_D.data(), block_D.get() + offset_D.at(i), matrix_D.size()); cutlass::device_memory::copy_to_host(matrix_Ref.data(), block_Ref.get(), matrix_D.size()); cutlass::TensorView<ElementC, LayoutC> view_D( matrix_D.data(), layout_D, extent_C); cutlass::TensorView<ElementC, LayoutC> view_Ref(matrix_Ref.data(), layout_D, extent_C); // Reference check passed = cutlass::reference::host::TensorEquals(view_D, view_Ref); if (!passed) { std::cerr << "\n***\nError - problem " << i << " failed the QA check\n***\n" << std::endl; return passed; } } return passed; } }; template <typename Gemm> class TestbedBatched : BaseTestbed<Gemm> { public: TestbedBatched( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): BaseTestbed<Gemm>(options_, init_A_, init_B_, init_C_, seed_) {} void print_problem_sizes() { std::cout << std::endl; size_t bin_idx = 0; size_t problem_count_check = 0; std::cout << "Conventionally executed as " << this->options.problem_bins.size() << " batched GEMMs:\n"; for (auto const & bin : this->options.problem_bins) { std::cout << " [" << bin_idx << "]: " << bin.first.m() << "-by-" << bin.first.n() << "-by-" << bin.first.k() << ", batch count: " << bin.second.size() << "\n"; ++bin_idx; problem_count_check += bin.second.size(); } if (problem_count_check != size_t(this->problem_count())) { std::cout << "\n***\nERROR in BINNING LOGIC!\n***\n" << std::endl; } std::cout << std::endl; } /// Executes a batched kernel and measures runtime Result profile() { std::cout << "Batched GEMM:\n" << "====================================================" << std::endl; Result result; result.passed = false; // Initialize the problem this->allocate(); this->initialize(); if (this->options.verbose) { print_problem_sizes(); } // // Prepare batched GEMM environment // int32_t effective_streams = (this->options.cuda_streams ? this->options.cuda_streams : 1); // Array of leading dimensions used by batched GEMM calls std::vector<cutlass::gemm::GemmCoord> bin_problem_sizes; std::vector<int32_t> bin_count; std::vector<int32_t> bin_ldm_A; std::vector<int32_t> bin_ldm_B; std::vector<int32_t> bin_ldm_C; std::vector<int32_t> bin_start; std::vector<void const *> ptr_A_batched_host; std::vector<void const *> ptr_B_batched_host; std::vector<void *> ptr_C_batched_host; for (auto const & bin : this->options.problem_bins) { int first_idx = bin.second.front(); bin_problem_sizes.push_back(this->options.problem_sizes.at(first_idx)); bin_count.push_back(int32_t(bin.second.size())); bin_ldm_A.push_back(static_cast<int32_t>(this->lda_host.at(first_idx))); bin_ldm_B.push_back(static_cast<int32_t>(this->ldb_host.at(first_idx))); bin_ldm_C.push_back(static_cast<int32_t>(this->ldc_host.at(first_idx))); if (ptr_A_batched_host.size() % 2) { ptr_A_batched_host.push_back(nullptr); ptr_B_batched_host.push_back(nullptr); ptr_C_batched_host.push_back(nullptr); } bin_start.push_back(int32_t(ptr_A_batched_host.size())); for (int idx : bin.second) { if (bin_problem_sizes.back() != this->options.problem_sizes.at(idx)) { std::cerr << "Error - failed to group problems.\n"; return result; } if (bin_ldm_A.back() != this->lda_host.at(idx)) { std::cerr << "Error - failed to group problems.\n"; return result; } if (bin_ldm_B.back() != this->ldb_host.at(idx)) { std::cerr << "Error - failed to group problems.\n"; return result; } if (bin_ldm_C.back() != this->ldc_host.at(idx)) { std::cerr << "Error - failed to group problems.\n"; return result; } ptr_A_batched_host.push_back(this->block_A.get() + this->offset_A.at(idx)); ptr_B_batched_host.push_back(this->block_B.get() + this->offset_B.at(idx)); ptr_C_batched_host.push_back(this->block_D.get() + this->offset_C.at(idx)); } } // Array of GMEM pointers used by batched array GEMM calls cutlass::DeviceAllocation<void const *> ptr_A_batched; cutlass::DeviceAllocation<void const *> ptr_B_batched; cutlass::DeviceAllocation<void *> ptr_C_batched; ptr_A_batched.reset(ptr_A_batched_host.size()); ptr_B_batched.reset(ptr_A_batched_host.size()); ptr_C_batched.reset(ptr_A_batched_host.size()); ptr_A_batched.copy_from_host(ptr_A_batched_host.data()); ptr_B_batched.copy_from_host(ptr_B_batched_host.data()); ptr_C_batched.copy_from_host(ptr_C_batched_host.data()); // // Create CUDA streams to maximize concurrency of batched-array GEMM kernels // std::vector<cudaStream_t> cuda_streams; // // Warmup run // if (this->options.cuda_streams) { for (int i = 0; i < this->options.cuda_streams; ++i) { cudaStream_t stream; result.error = cudaStreamCreate(&stream); if (result.error != cudaSuccess) { std::cerr << "Failed to create CUDA stream." << std::endl; return result; } cuda_streams.push_back(stream); } } else { cuda_streams.push_back(nullptr); } // Use 'D' for the in/out workspace this->block_D.copy_from_device(this->block_C.get()); for (int bin_idx = 0; bin_idx < int32_t(bin_problem_sizes.size()); ++bin_idx) { cutlass::gemm::GemmCoord const & problem = bin_problem_sizes[bin_idx]; int32_t batch_count = bin_count[bin_idx]; int32_t bin_start_idx = bin_start[bin_idx]; int32_t lda = bin_ldm_A[bin_idx]; int32_t ldb = bin_ldm_B[bin_idx]; int32_t ldc = bin_ldm_C[bin_idx]; void const ** ptr_A_array = ptr_A_batched.get() + bin_start[bin_idx]; void const ** ptr_B_array = ptr_B_batched.get() + bin_start[bin_idx]; void ** ptr_C_array = ptr_C_batched.get() + bin_start[bin_idx]; // // Initialize the CUTLASS GEMM operator // // Configure the GEMM arguments typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta); typename Gemm::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kArray, problem, batch_count, epilogue_op, (void const *)ptr_A_array, (void const *)ptr_B_array, (void const *)ptr_C_array, (void *)ptr_C_array, int64_t(), int64_t(), int64_t(), int64_t(), int64_t(lda), int64_t(ldb), int64_t(ldc), int64_t(ldc) }; Gemm gemm_op; cutlass::Status status = gemm_op.initialize(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } status = gemm_op(); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } } // // Wait for completion // result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // // Wait for completion // result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // Record an event at the start of a series of GEMM operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // int last_stream_idx = 0; for (int iter = 0; iter < this->options.iterations; ++iter) { for (int bin_idx = 0; bin_idx < int32_t(bin_problem_sizes.size()); ++bin_idx) { cutlass::gemm::GemmCoord const & problem = bin_problem_sizes[bin_idx]; int32_t batch_count = bin_count[bin_idx]; int32_t bin_start_idx = bin_start[bin_idx]; int32_t lda = bin_ldm_A[bin_idx]; int32_t ldb = bin_ldm_B[bin_idx]; int32_t ldc = bin_ldm_C[bin_idx]; void const ** ptr_A_array = ptr_A_batched.get() + bin_start[bin_idx]; void const ** ptr_B_array = ptr_B_batched.get() + bin_start[bin_idx]; void ** ptr_C_array = ptr_C_batched.get() + bin_start[bin_idx]; last_stream_idx = (bin_idx % effective_streams); // // Initialize the CUTLASS GEMM operator // // Configure the GEMM arguments typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta); typename Gemm::Arguments arguments{ cutlass::gemm::GemmUniversalMode::kArray, problem, batch_count, epilogue_op, (void const *)ptr_A_array, (void const *)ptr_B_array, (void const *)ptr_C_array, (void *)ptr_C_array, int64_t(), int64_t(), int64_t(), int64_t(), int64_t(lda), int64_t(ldb), int64_t(ldc), int64_t(ldc) }; Gemm gemm_op; cutlass::Status status = gemm_op.initialize(arguments); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } status = gemm_op(cuda_streams[last_stream_idx]); if (status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS error on line " << __LINE__ << std::endl; return result; } } } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Wait for work to be completed // result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(this->options.iterations); result.gflops = this->options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } for (auto stream : cuda_streams) { if (stream) { (void)cudaStreamDestroy(stream); } } std::cout << " " << this->options.problem_bins.size() << " batched GEMMs launched" << std::endl; std::cout << std::endl; std::cout << " " << "Batched Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "Batched GFLOPs: " << result.gflops << std::endl; std::string provider = "CUTLASS"; if (this->options.output_file.good()) { this->options.output_file << this->options.output_tag << "," << provider << ",batched," << this->options.problem_count << "," << result.runtime_ms << "," << result.gflops << std::endl; } result.passed = true; return result; } }; template <typename Gemm_, cutlass::gemm::kernel::GroupScheduleMode GroupScheduleMode_> class TestbedGrouped : BaseTestbed<Gemm_> { public: TestbedGrouped( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3080 ): BaseTestbed<Gemm_>(options_, init_A_, init_B_, init_C_, seed_) {} // Redefine GEMM with different GroupScheduleMode_ using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped< typename Gemm_::ElementA, typename Gemm_::LayoutA, Gemm_::kTransformA, Gemm_::kAlignmentA, typename Gemm_::ElementB, typename Gemm_::LayoutB, Gemm_::kTransformB, Gemm_::kAlignmentB, typename Gemm_::ElementC, typename Gemm_::LayoutC, typename Gemm_::ElementAccumulator, typename Gemm_::OperatorClass, typename Gemm_::ArchTag, typename Gemm_::ThreadblockShape, typename Gemm_::WarpShape, typename Gemm_::InstructionShape, typename Gemm_::EpilogueOutputOp, typename Gemm_::ThreadblockSwizzle, Gemm_::kStages, GroupScheduleMode_>::GemmKernel; using Gemm = cutlass::gemm::device::GemmGrouped<GemmKernel>; /// Verbose printing of problem sizes void print_problem_sizes() { std::cout << std::endl; // Print groups std::cout << this->problem_count() << " groups:\n"; int32_t idx = 0; int64_t total_tiles = 0; for (auto const & problem : this->options.problem_sizes) { int tiles = Gemm::problem_tile_count(problem); total_tiles += tiles; std::cout << " [" << idx << "]: " << problem.m() << "-by-" << problem.n() << "-by-" << problem.k() << " (" << tiles << " threadblock tiles)" << "\n"; ++idx; } std::cout << std::endl; } /// Sort problems in descending order of problem-K dimension void sort_problems() { Gemm::sort_problems(this->options.problem_count, this->options.problem_sizes.data(), this->lda_host.data(), this->ldb_host.data(), this->ldc_host.data(), this->ldd_host.data(), this->offset_A.data(), this->offset_B.data(), this->offset_C.data(), this->offset_D.data()); } /// Executes a grouped kernel and measures runtime Result profile() { std::string sched_mode = this->options.scheduler_mode_to_str.find(GroupScheduleMode_)->second; std::cout << std::endl; std::cout << "Grouped GEMM (CUTLASS) with mode " << sched_mode << ":\n" << "====================================================" << std::endl; Result result; int threadblock_count = Gemm::sufficient(this->options.problem_sizes.data(), this->options.problem_count); // Early exit if (!threadblock_count) { std::cout << "Active CUDA device lacks hardware resources to run CUTLASS Grouped GEMM kernel." << std::endl; return result; } result.passed = false; // Initialize the problem this->allocate(); if (this->options.sort_problems) { sort_problems(); } this->initialize(); if (this->options.verbose) { print_problem_sizes(); } // Configure the GEMM arguments typename Gemm::EpilogueOutputOp::Params epilogue_op(this->options.alpha, this->options.beta); // Configure GEMM arguments typename Gemm::Arguments args( this->problem_sizes_device.get(), this->problem_count(), threadblock_count, epilogue_op, this->ptr_A.get(), this->ptr_B.get(), this->ptr_C.get(), this->ptr_D.get(), this->lda.get(), this->ldb.get(), this->ldc.get(), this->ldd.get(), this->options.problem_sizes.data() ); // Initialize the GEMM object Gemm gemm; size_t workspace_size = gemm.get_workspace_size(args); cutlass::DeviceAllocation<uint8_t> workspace(workspace_size); result.status = gemm.initialize(args, workspace.get()); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to initialize CUTLASS Grouped GEMM kernel." << std::endl; return result; } // Run the grouped GEMM object result.status = gemm.run(); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Grouped GEMM kernel." << std::endl; return result; } // Wait for completion result.error = cudaDeviceSynchronize(); if (result.error != cudaSuccess) { std::cerr << "Kernel execution error: " << cudaGetErrorString(result.error); return result; } // // Verify correctness // result.passed = true; if (this->options.reference_check) { result.passed = this->verify(); } // // Warm-up run of the grouped GEMM object // result.status = gemm.run(); if (result.status != cutlass::Status::kSuccess) { std::cerr << "Failed to run CUTLASS Grouped GEMM kernel." << std::endl; return result; } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMM operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < this->options.iterations; ++iter) { gemm(); } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(this->options.iterations); result.gflops = this->options.gflops(result.runtime_ms / 1000.0); // // Cleanup // for (auto event : events) { (void)cudaEventDestroy(event); } // Optionally profile initialization if (this->options.profile_initialization) { // Warm up gemm.initialize(args, workspace.get()); auto start_time = std::chrono::high_resolution_clock::now(); for (int32_t i = 0; i < this->options.iterations; ++i) { gemm.initialize(args, workspace.get()); } auto end_time = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> duration = end_time - start_time; duration /= double(this->options.iterations); result.initialization_time_ms = duration.count(); } int64_t total_tiles = Gemm::group_tile_count(args); std::cout << " " << total_tiles << " total threadblock tiles." << std::endl; std::cout << std::endl; std::cout << " " << "Grouped Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " " << "Grouped GFLOPs: " << result.gflops << std::endl; if (this->options.profile_initialization) { std::cout << " " << "Init Runtime: " << result.initialization_time_ms << " ms" << std::endl; } if (this->options.output_file.good()) { this->options.output_file << this->options.output_tag << ",CUTLASS,grouped-" << sched_mode << "," << this->options.problem_count << "," << result.runtime_ms << "," << result.gflops << std::endl; } std::cout << "\nPassed\n"; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's Grouped GEMM example requires a GPU of NVIDIA's Ampere Architecture or " << "later (compute capability 80 or greater).\n"; return 0; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return -1; } // // Define the Grouped and Batched GEMM types // using ElementA = cutlass::half_t; using ElementB = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = float; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; // Gemm operator cutlass_tensorop_f16_s16816gemm_f16_128x128_32x4_nt_align8 using GemmBatched = cutlass::gemm::device::GemmUniversal< ElementA, LayoutA, ElementB, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, 4 >; // Define a grouped GEMM kernel with all template parameters set except // for scheduling mode. This will be used as the template for all scheduling // modes executed. using GemmKernel = typename cutlass::gemm::kernel::DefaultGemmGrouped< ElementA, LayoutA, cutlass::ComplexTransform::kNone, 8, ElementB, LayoutB, cutlass::ComplexTransform::kNone, 8, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination< ElementOutput, 128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator>, // NOTE: Threadblock swizzling is currently not supported by CUTLASS's grouped kernels. // This parameter is passed in at present to match the APIs of other kernels. The parameter // is unused within the kernel. cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle, 4>::GemmKernel; using GemmGrouped = cutlass::gemm::device::GemmGrouped<GemmKernel>; // // Profile it // TestbedBatched<GemmBatched> testbed_batched(options); Result result = testbed_batched.profile(); if (result.error) { return 1; } using GroupScheduleMode = cutlass::gemm::kernel::GroupScheduleMode; for (GroupScheduleMode mode : options.scheduler_modes) { Result result; switch (mode) { case GroupScheduleMode::kDeviceOnly: { TestbedGrouped<GemmGrouped, GroupScheduleMode::kDeviceOnly> runner(options); result = runner.profile(); break; } case GroupScheduleMode::kHostPrecompute: { TestbedGrouped<GemmGrouped, GroupScheduleMode::kHostPrecompute> runner(options); result = runner.profile(); break; } } if (result.error != cudaSuccess) { return 1; } // Override verbose flag to avoid printing duplicate information for each scheduling mode options.verbose = false; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/24_gemm_grouped/gemm_grouped.cu/0
{ "file_path": "cutlass/examples/24_gemm_grouped/gemm_grouped.cu", "repo_id": "cutlass", "token_count": 20904 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <float.h> #include <stdio.h> #include <cmath> //////////////////////////////////////////////////////////////////////////////// // Debugging functions //////////////////////////////////////////////////////////////////////////////// // Nans & inf detection #define NANCHECK(frag) \ { \ for (size_t _i = 0; _i < frag.size(); ++_i) { \ assert(std::isfinite(float(frag[_i]))); \ assert(!std::isnan(float(frag[_i]))); \ } \ } // Print on the first thread of the first block #if 1 #define PRINT_WARP_ID 0 #define PRINT_LANE_ID 0 #define PRINT_B0_T0(msg, ...) \ if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && \ threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \ threadIdx.z == 0) { \ printf(msg "\n", ##__VA_ARGS__); \ } #define PRINT_T0(msg, ...) \ if (threadIdx.x == PRINT_LANE_ID && threadIdx.y == PRINT_WARP_ID && \ threadIdx.z == 0) { \ printf(msg "\n", ##__VA_ARGS__); \ } #define PRINT_TX_LX(msg, ...) \ for (int bx = 0; bx < gridDim.x; ++bx) { \ for (int by = 0; by < gridDim.y; ++by) { \ for (int bz = 0; bz < gridDim.z; ++bz) { \ for (int tx = 0; tx < blockDim.x; ++tx) { \ for (int ty = 0; ty < blockDim.y; ++ty) { \ for (int tz = 0; tz < blockDim.z; ++tz) { \ __syncthreads(); \ if (blockIdx.x == bx && blockIdx.y == by && blockIdx.z == bz && \ threadIdx.x == tx && threadIdx.y == ty && \ threadIdx.z == tz) { \ printf( \ "[%d,%d,%d][%d,%d,%d]" msg "\n", \ bx, \ by, \ bz, \ tx, \ ty, \ tz, \ ##__VA_ARGS__); \ } \ } \ } \ } \ } \ } \ } #else #define PRINT_B0_T0 #define PRINT_TX_LX #endif struct __string_view { char const* data; std::size_t size; }; #if __cplusplus >= 201402L template <class T> constexpr __string_view __get_type_name() { char const* p = __PRETTY_FUNCTION__; while (*p++ != '=') ; for (; *p == ' '; ++p) ; char const* p2 = p; int count = 1; for (;; ++p2) { switch (*p2) { case '[': ++count; break; case ']': --count; if (!count) return {p, std::size_t(p2 - p)}; } } return {}; } #else template <class T> constexpr __string_view __get_type_name() { return {"unsupported", 11}; } #endif // Print a given array #define PRINT_ACCUM8_T0_L0_START(name, accum, start) \ PRINT_B0_T0( \ "%s[%d:%d] - {%f, %f, %f, %f, %f, %f, %f, %f}", \ name, \ int(start), \ int(start + 8), \ float(accum[start + 0]), \ float(accum[start + 1]), \ float(accum[start + 2]), \ float(accum[start + 3]), \ float(accum[start + 4]), \ float(accum[start + 5]), \ float(accum[start + 6]), \ float(accum[start + 7])); #define PRINT_ACCUM8_T0_L0(name, accum) PRINT_ACCUM8_T0_L0_START(name, accum, 0) #define PRINT_FRAG_T0_L0(name, frag) \ { \ auto typeStr = __get_type_name<decltype(frag)>(); \ PRINT_B0_T0("printing %s (%s)", name, typeStr.data); \ for (size_t _start = 0; _start < frag.size(); _start += 8) { \ PRINT_ACCUM8_T0_L0_START(" ", frag, _start); \ } \ /*__syncthreads(); \ NANCHECK(frag); */ \ } #define PRINT_ARRAY_T0_L0_INCR(name, array, length, incr) \ { \ PRINT_B0_T0("printing %s (len=%d)", name, int(length)); \ for (int _start = 0; _start < length; _start += incr) { \ PRINT_ACCUM8_T0_L0_START(" ", array, _start); \ } \ } #define PRINT_ARRAY_T0_L0(name, array, length) \ PRINT_ARRAY_T0_L0_INCR(name, array, length, 8) // Print a 4x4 matrix #define PRINT_TENSOR4x4_T0_L0_START(name, ref, start_x, start_y) \ PRINT_B0_T0( \ "%s[%d:%d, %d:%d]:\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f\n %f, %f, %f, %f", \ name, \ int(start_x), \ int(start_x + 4), \ int(start_y), \ int(start_y + 4), \ float(ref.at({start_x + 0, start_y + 0})), \ float(ref.at({start_x + 0, start_y + 1})), \ float(ref.at({start_x + 0, start_y + 2})), \ float(ref.at({start_x + 0, start_y + 3})), \ float(ref.at({start_x + 1, start_y + 0})), \ float(ref.at({start_x + 1, start_y + 1})), \ float(ref.at({start_x + 1, start_y + 2})), \ float(ref.at({start_x + 1, start_y + 3})), \ float(ref.at({start_x + 2, start_y + 0})), \ float(ref.at({start_x + 2, start_y + 1})), \ float(ref.at({start_x + 2, start_y + 2})), \ float(ref.at({start_x + 2, start_y + 3})), \ float(ref.at({start_x + 3, start_y + 0})), \ float(ref.at({start_x + 3, start_y + 1})), \ float(ref.at({start_x + 3, start_y + 2})), \ float(ref.at({start_x + 3, start_y + 3}))); #define PRINT_TENSOR4x4_T0_L0(name, ref) \ PRINT_TENSOR4x4_T0_L0_START(name, ref, 0, 0) #define PRINT_PROBLEM_SIZE(name, ps) \ PRINT_B0_T0( \ "%s.problem_size: {.m=%d, .n=%d, .k=%d}", \ name, \ int(ps.m()), \ int(ps.n()), \ int(ps.k())) template <typename LambdaIterator, typename LaneOffsetT, typename AccumT> CUTLASS_DEVICE void print_warp_accum( AccumT accum, LaneOffsetT lane_offset, int32_t num_rows, int32_t num_cols) { bool is_main = blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0; for (int row = 0; row < num_rows; ++row) { for (int col = 0; col < num_cols; ++col) { if (col % 32 == 0) { if (is_main) { printf("\nmat[%3d, %3d:%3d]", row, col, col + 32); } __syncthreads(); } LambdaIterator::iterateRows( lane_offset, [&](int accum_m) {}, [&](int accum_m, int accum_n, int idx) { if (row == accum_m && col == accum_n && (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0)) { printf(" %6.1f", float(accum[idx])); } }, [&](int accum_m) {}); __syncthreads(); } if (is_main) { printf("\n"); } } }
cutlass/examples/41_fused_multi_head_attention/debug_utils.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/debug_utils.h", "repo_id": "cutlass", "token_count": 7526 }
8
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/functional.h" #include "cutlass/gemm/warp/mma_simt_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" #include "cutlass/matrix_shape.h" /* TensorCores have different accumulator layouts. This file provides a class to easily map the accumulator i-th element with the corresponding matrix row/col. */ template <typename T, typename accum_t, int kWarpSize> struct AccumLambdaIteratorSm80 { static_assert( cutlass::platform:: is_same<typename T::Layout, cutlass::layout::RowMajor>::value, "only RowMajor is supported"); using Policy = typename T::Policy; using InstructionShape = typename T::InstructionShape; using OpDelta = typename T::OpDelta; using Shape = typename T::Shape; static int const kElementsPerAccess = InstructionShape::kN / 4; static int const kRowsPerTile = 8; static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset( int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); return cutlass::MatrixCoord( quad + tile_offset.row() * Shape::kRow, lane_in_quad * kElementsPerAccess + tile_offset.column() * Shape::kColumn); } template <typename FA, typename FB, typename FC> CUTLASS_DEVICE static void iterateRows( cutlass::MatrixCoord& lane_offset, FA beginRow, FB op, FC endRow) { // See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < kAccumulatorRows; ++row) { int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile + lane_offset.row(); beginRow(accum_m); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { int mma_accum_start = kAccumulatorRows * kElementsPerAccess * (mma_n * Policy::MmaIterations::kRow + mma_m); CUTLASS_PRAGMA_UNROLL for (int col = 0; col < kElementsPerAccess; ++col) { int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col + lane_offset.column(); int idx = mma_accum_start + row * kElementsPerAccess + col; op(accum_m, accum_n, idx); } } endRow(accum_m); } } } template <typename DT, typename F> CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) { // In each warp, 4 threads will work on the same row // - the ones with the same `quad` auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1); myValue = fn(myValue, otherV); otherV = __shfl_xor_sync(0xffffffff, myValue, 2); myValue = fn(myValue, otherV); int lane_in_quad = (lane_id & 3); return lane_in_quad == 0; } }; template <typename T, typename accum_t, int kWarpSize> struct AccumLambdaIteratorSm70 { static_assert( cutlass::platform:: is_same<typename T::Layout, cutlass::layout::RowMajor>::value, "only RowMajor is supported"); using Policy = typename T::Policy; using InstructionShape = typename T::InstructionShape; using OpDelta = typename T::OpDelta; using Shape = typename T::Shape; using Element = accum_t; static int const kElementsPerPartial = 4; using EleShapePerPatial = typename cutlass::platform::conditional< cutlass::platform::is_same<Element, float>::value, cutlass::MatrixShape<2, 2>, cutlass::MatrixShape<1, 4>>::type; static int const kElementsPerMma = 8; static int const kAccumulatorPatials = 2; using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>; static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset( int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset) { int quad = (lane_id >> 2); int lane_in_quad = (lane_id & 3); int accum_m, accum_n; if (cutlass::platform::is_same<Element, float>::value) { // (quad[2],quad[0])+lane_in_quad[0] accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1); // (quad[1])+lane_in_quad[1] accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials + (lane_in_quad & 2); } else { accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0]) accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials; } return cutlass::MatrixCoord( accum_m + tile_offset.row() * Shape::kRow, accum_n + tile_offset.column() * Shape::kColumn); } template <typename DT, typename F> CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) { static_assert( cutlass::platform::is_same<Element, float>::value, "update to support non-float accum"); // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16 // T0 & T2 share same line within a quad auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1); myValue = fn(myValue, otherV); // quad 0 and quad 2 are on the same lines otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3); myValue = fn(myValue, otherV); return (lane_id & ((1 << 1) | (1 << 3))) == 0; } template <typename FA, typename FB, typename FC> CUTLASS_DEVICE static void iterateRows( cutlass::MatrixCoord& lane_offset, FA beginRow, FB op, FC endRow) { CUTLASS_PRAGMA_UNROLL for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < EleShapePerPatial::kRow; ++m) { int accum_m = tile_m * Policy::InterleavedTile::kRow + mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row(); beginRow(accum_m); CUTLASS_PRAGMA_UNROLL for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { CUTLASS_PRAGMA_UNROLL for (int p = 0; p < kAccumulatorPatials; ++p) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) * Policy::MmaIterations::kColumn + mma_n) * Policy::MmaIterations::kRow + mma_m) * kElementsPerMma; int accum_n = tile_n * Policy::InterleavedTile::kColumn + mma_n * QuadShapePerPatialMma::kColumn + p * Policy::InterleavedTile::kColumn / 2 + n + lane_offset.column(); int idx = mma_accum_start + p * kElementsPerPartial + m * EleShapePerPatial::kColumn + n; op(accum_m, accum_n, idx); } } } } endRow(accum_m); } } } } }; template <typename T, typename accum_t, int kWarpSize> struct AccumLambdaIteratorSimt { using Policy = typename T::Policy; using Iterations = typename T::Iterations; using Element = typename T::Element; using Delta = typename T::Delta; using Shape = typename T::Shape; static_assert( cutlass::platform:: is_same<typename T::Layout, cutlass::layout::RowMajor>::value, "only RowMajor is supported"); template <typename DT, typename F> CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) { CUTLASS_PRAGMA_UNROLL for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) { auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit); myValue = fn(myValue, otherV); } return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0; } template <typename FA, typename FB, typename FC> CUTLASS_DEVICE static void iterateRows( cutlass::MatrixCoord& lane_offset, FA beginRow, FB op, FC endRow) { CUTLASS_PRAGMA_UNROLL for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { int accum_m = mma_m * Delta::kRow + m + lane_offset.row(); beginRow(accum_m); CUTLASS_PRAGMA_UNROLL for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { int accum_n = mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + lane_offset.column(); CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { int idx = n + Policy::LaneMmaShape::kN * (mma_n + Iterations::kColumn * (m + mma_m * Policy::LaneMmaShape::kM)); op(accum_m, accum_n + n, idx); } } endRow(accum_m); } } } static cutlass::MatrixCoord CUTLASS_DEVICE get_lane_offset( int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset) { static_assert( cutlass::platform::is_same< typename Policy::LaneLayout, cutlass::layout::RowMajorInterleaved<1>>::value, ""); typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); cutlass::MatrixCoord lane_offset = lane_layout.inverse(lane_id) * cutlass::MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); return lane_offset + tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn); } }; template <typename T, typename accum_t, int kWarpSize> struct DefaultMmaAccumLambdaIterator; // Simt template <typename S, typename P, typename accum_t, int kWarpSize> struct DefaultMmaAccumLambdaIterator< cutlass::gemm::warp::MmaSimtTileIterator< S, cutlass::gemm::Operand::kC, accum_t, cutlass::layout::RowMajor, P, 1, 1>, accum_t, kWarpSize> { using WarpIterator = typename cutlass::gemm::warp::MmaSimtTileIterator< S, cutlass::gemm::Operand::kC, accum_t, cutlass::layout::RowMajor, P, 1, 1>; using Iterator = AccumLambdaIteratorSimt<WarpIterator, accum_t, kWarpSize>; }; // TensorOp - Volta template <typename S1, typename S2, typename accum_t, int kWarpSize> struct DefaultMmaAccumLambdaIterator< cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator< S1, accum_t, cutlass::layout::RowMajor, S2, cutlass::MatrixShape<1, 1>>, accum_t, kWarpSize> { using WarpIterator = typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator< S1, accum_t, cutlass::layout::RowMajor, S2, cutlass::MatrixShape<1, 1>>; using Iterator = AccumLambdaIteratorSm70<WarpIterator, accum_t, kWarpSize>; }; // TensorOp - Sm75+ template < typename S1, typename S2, typename S3, typename accum_t, int kWarpSize> struct DefaultMmaAccumLambdaIterator< cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator< S1, accum_t, cutlass::layout::RowMajor, S2, S3>, accum_t, kWarpSize> { using WarpIterator = typename cutlass::gemm::warp::MmaTensorOpAccumulatorTileIterator< S1, accum_t, cutlass::layout::RowMajor, S2, S3>; using Iterator = AccumLambdaIteratorSm80<WarpIterator, accum_t, kWarpSize>; };
cutlass/examples/41_fused_multi_head_attention/gemm/mma_accum_lambda_iterator.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/gemm/mma_accum_lambda_iterator.h", "repo_id": "cutlass", "token_count": 6214 }
9
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import helper import gen_ir as ir class gen_test: def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"): self.fuse_gemm_info = fuse_gemm_info self.gen_class_name = gen_class_name self.user_header_file = user_header_file self.sample_dir = output_dir self.b2b_num = len(fuse_gemm_info) def gen_cpp_sample(self): code = "/* Auto Generated code - Do not edit.*/\n" code += "#include <stdio.h> \n" code += "#include \"cutlass/gemm/device/gemm_batched.h\" \n" code += "#include \"cutlass/cutlass.h\" \n" code += "#include \"../cutlass_irrelevant.h\" \n" code += "#include \"../cutlass_verify.h\" \n" code += "#include \"leaky_bias.h\" \n" code += "#include \"utils.h\" \n" code += "int main(int args, char * argv[]) {\n" code += " " + "int M = atoi(argv[1]);\n" code += " " + "int K0 = " + str(self.fuse_gemm_info[0]['mnk'][0]) + ";\n" code += " " + "if(args == 3);\n" code += " " + " " + "K0 = atoi(argv[2]);\n" code += " " + "int B = 1;\n" code += " " + "if(args == 4);\n" code += " " + " " + "B = atoi(argv[3]);\n" code += " " + "srand(1234UL);\n" code += " " + "int device_id = 0;\n" code += " " + "cudaGetDevice(&device_id);\n" code += " " + "cudaDeviceProp prop;\n" code += " " + "cudaGetDeviceProperties(&prop, device_id);\n" code += " " + "int sm = prop.major *10 + prop.minor;\n" code += "using ElementCompute = cutlass::half_t;\n" for i in range(self.b2b_num): code += " " + helper.var_idx("ElementCompute alpha", i) + " = ElementCompute(1);\n" addbias = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) if addbias: code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(1);\n" else: code += " " + helper.var_idx("ElementCompute beta", i) + " = ElementCompute(0);\n" code += " " + "size_t flops = 0;\n" for i in range(self.b2b_num): m = self.fuse_gemm_info[i]['mnk'][0] n = self.fuse_gemm_info[i]['mnk'][1] k = self.fuse_gemm_info[i]['mnk'][2] bias_shape = helper.get_epilogue_bias_shape(self.fuse_gemm_info[i]) this_k = "K0" if (i > 0): this_k = str(k) code += " " + "flops += size_t(2) * size_t(M) * size_t(B) * " + "size_t(" + str(n) + ") * size_t(" + this_k + ");\n" code += " " + helper.var_idx("cutlass::gemm::GemmCoord problem_size_", i) + "(" + "M" + ", " + str(n) + ", " + this_k + ");\n" code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_A", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".k());\n" code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_B", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".n() * problem_size_", i) + ".k());\n" code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_C", i) + "(B * " + str(bias_shape[0]) + " * " + str(bias_shape[1]) + ");\n" code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D_cutlass_ref", i) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_", i) + ".n());\n" code += " " + helper.var_idx("Mat_A", i) + ".init();\n" code += " " + helper.var_idx("Mat_B", i) + ".init();\n" code += " " + helper.var_idx("Mat_C", i) + ".init();\n" code += " " + helper.var_idx("memory_unit<cutlass::half_t> Mat_D", self.b2b_num - 1) + helper.var_idx("(B * problem_size_", i) + helper.var_idx(".m() * problem_size_",self.b2b_num - 1) + ".n());\n" params = [] params.append("M") params.append("B") params.append("Mat_A0.device_ptr") for i in range(self.b2b_num): params.append(helper.var_idx("Mat_B", i) + ".device_ptr") params.append(helper.var_idx("Mat_C", i) + ".device_ptr") if i != self.b2b_num-1: params.append(helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr") params.append(helper.var_idx("Mat_D", self.b2b_num - 1) + ".device_ptr") code += " " + "Param arguments = {\n" code += " " + " " + "M,\n" code += " " + " " + "K0,\n" code += " " + " " + "B,\n" code += " " + " " + "reinterpret_cast<const void*>(Mat_A0.device_ptr),\n" cnt = 1 for i in range(self.b2b_num): bias_flag = helper.get_epilogue_add_bias_or_not( self.fuse_gemm_info[i]) code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_B", i) + ".device_ptr" + "),\n" cnt += 1 if bias_flag: code += " " + " " + "reinterpret_cast<const void*>(" + helper.var_idx("Mat_C", i) + ".device_ptr" + "),\n" cnt += 1 else: code += " " + " " + "reinterpret_cast<const void*>(NULL),\n" epilogue_args = helper.get_epilogue_args(self.fuse_gemm_info[i]) acc_tp = helper.get_epilogue_compute_tp(self.fuse_gemm_info[i]) for arg in epilogue_args: arg_value = str(arg[2]) code += " " + " " + helper.type_2_cutlass_type(acc_tp) + "(" + arg_value + "),\n" if i != self.b2b_num - 1: code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr" + "),\n" else: code += " " + " " + "reinterpret_cast<void*>(" + helper.var_idx("Mat_D", i) + ".device_ptr" + ")};\n" code += " " + "TI(FUSED_CUTLASS);\n" code += " " + "for(int i = 0; i < 100; i++){\n" code += " " + " " + "one_api(arguments, sm, NULL);\n" code += " " + "}\n" code += " " + "TO(FUSED_CUTLASS, \"FUSED_CUTLASS\", 100);\n" code += "\n" for i in range(self.b2b_num): code_this = "" N_str = str(self.fuse_gemm_info[i]['mnk'][1]) code_this += " " + helper.var_idx("typename Gemm", i) + helper.var_idx("::Arguments arguments_", i) + "{\n" code_this += " " + " " + helper.var_idx("problem_size_", i) + ",\n" ldmA = str(self.fuse_gemm_info[i]['mnk'][2]) if i == 0: ldmA = "K0" ldmB = str(self.fuse_gemm_info[i]['mnk'][2]) if i == 0: ldmB = "K0" ldmC = str(self.fuse_gemm_info[i]['mnk'][1]) ldmBias = str(helper.get_epilogue_bias_ldm(self.fuse_gemm_info[i])) if self.fuse_gemm_info[i]['A_format'] is 'Col': ldmA = "M" if self.fuse_gemm_info[i]['B_format'] is 'Row': ldmB = str(self.fuse_gemm_info[i]['mnk'][1]) if self.fuse_gemm_info[i]['C_format'] is 'Col': ldmC = "M" if i == 0: code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_A", i) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n" else: code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['A_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i - 1) + ".device_ptr), " + ldmA + "}, " + "M * " + ldmA + ",\n" code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['B_tp']) + "*>(" + helper.var_idx("Mat_B", i) + ".device_ptr), " + ldmB + "}, " + N_str + " * " + ldmB + ",\n" M_bias = str(helper.get_epilogue_bias_shape(self.fuse_gemm_info[i])[0]) code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_C", i) + ".device_ptr), " + ldmBias + "}, " + M_bias + " * " + N_str + ",\n" code_this += " " + " " + "{reinterpret_cast<" + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['C_tp']) + "*>(" + helper.var_idx("Mat_D_cutlass_ref", i) + ".device_ptr), " + ldmC + "}, " + "M * " + ldmC + ",\n" code_this += " " + " " + "{ " + helper.var_idx("alpha", i) + ", " + helper.var_idx("beta", i) for epilogue_arg in helper.get_epilogue_args(self.fuse_gemm_info[i]): arg_value = str(epilogue_arg[2]) code_this += ", " + helper.type_2_cutlass_type(self.fuse_gemm_info[i]['Acc_tp']) + "(" + str(arg_value) + ")" code_this += " " + " },\n" code_this += " " + " " + "B};\n" code += code_this code += " " + "TI(UNFUSED_CUTLASS);\n" code += " " + "for(int i = 0; i < 100; i++){\n" code += " " + " " + self.gen_class_name + "_verify(\n" for i in range(self.b2b_num): code += " " + " " + " " + helper.var_idx("arguments_", i) + ",\n" code += " " + " " + " " + "NULL);\n" code += " " + "}\n" code += " " + "TO(UNFUSED_CUTLASS, \"UNFUSED_CUTLASS\", 100);\n" code += " " + helper.var_idx("Mat_D_cutlass_ref", self.b2b_num - 1) + ".d2h();\n" code += " " + helper.var_idx("Mat_D", self.b2b_num - 1) + ".d2h();\n" code += " " + helper.var_idx("check_result(Mat_D_cutlass_ref", self.b2b_num - 1) + helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) \ + helper.var_idx(".host_ptr, Mat_D", self.b2b_num - 1) + ".elements);\n" code += "\n\n}\n" with open(self.sample_dir + "sample.cu", "w+") as f: f.write(code)
cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_sample.py", "repo_id": "cutlass", "token_count": 5946 }
10
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/epilogue/thread/scale_type.h" #include "cutlass/epilogue/thread/linear_combination_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. /// /// D = alpha * accumulator + beta * source + uniform /// template < typename ElementOutput_, ///< Data type used to load and store tensors int Count, ///< Number of elements computed per operation. ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data to store typename ElementAccumulator_ = ElementOutput_, ///< Accumulator data type typename ElementCompute_ = ElementOutput_, ///< Data type used to compute linear combination FloatRoundStyle Round = FloatRoundStyle::round_to_nearest > class LeftSiLUAndMul { public: using ElementOutput = ElementOutput_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; static FloatRoundStyle const kRound = Round; struct Params{}; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE LeftSiLUAndMul(Params const &/*params*/) {} /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return true; } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { assert(false); } /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( FragmentAccumulator const &lhs, FragmentAccumulator const &rhs) const { // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_to_compute; // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> compute_to_output; ComputeFragment converted_lhs = accumulator_to_compute(lhs); ComputeFragment converted_rhs = accumulator_to_compute(rhs); cutlass::epilogue::thread::SiLu<ComputeFragment> silu; cutlass::multiplies<ComputeFragment> mul; auto silu_lhs = silu(converted_lhs); return compute_to_output(mul(silu_lhs, converted_rhs)); } CUTLASS_HOST_DEVICE ElementOutput operator()( ElementAccumulator const& lhs, ElementAccumulator const& rhs ) const { ElementCompute convert_lhs(lhs); ElementCompute convert_rhs(rhs); cutlass::epilogue::thread::SiLu<ElementCompute> silu; cutlass::multiplies<ElementCompute> mul; auto silu_lhs = silu(convert_lhs); return ElementOutput(mul(silu_lhs, convert_rhs)); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/45_dual_gemm/thread/left_silu_and_mul.h/0
{ "file_path": "cutlass/examples/45_dual_gemm/thread/left_silu_and_mul.h", "repo_id": "cutlass", "token_count": 1804 }
11
This example shows how to do mixed types GEMMs in CUTLASS. ## High level overview This example shows how to perform GEMMs on Hopper when A and B have different types. This implementation always passes the type with fewer bits through the register file and upcasts to the type with the higher bit count. When relying on `KernelScheduleAuto`, the main loop supporting different A and B types will be selected whenever the bit count of A is not equal to the bit count of B. Users can manually select the mixed type main loop and explicitly choose the scheduling policy by specifying one of the following schedules to the `CollectiveBuilder`: `KernelTmaWarpSpecializedMixedInput`, `KernelTmaWarpSpecializedPingpongMixedInput` or `KernelTmaWarpSpecializedCooperativeMixedInput`. This first version only supports mixed type GEMMs using TMA. ## Performance While the example offers a harness for straightforward benchmarking, this initial implementation isn't optimized for performance in the majority of scenarios. We expect this implementation to be performant for `{fp16, bf16} x {int8, int4}` and `{fp8} x {int4}` for problems that are compute bound. Additionally, we expect good performance for `fp16, bf16` or `fp32` scales and zero-points. For best performance, it is ideal to have the scales and zero-points be the same type. We are currently optimizing the following cases: 1. Memory bound cases for all types ## Limitations * The type that needs to be converted must go through the register file. This means that the collective will swap and transpose whenever the type with fewer bits is the B operand. The user must be aware of when these swaps happen. Note that TMA epilogues currently do not support *implicit* swap + transpose, so non-tma epilogues must be used in this case. We plan to relax this limitation in a future release. * The layout of the narrow type must be K-major. This means the following: * Narrow type is the A operand: Must be Row-Major * Narrow type is the B operand: Must be Column-Major * For 8-bit x 4-bit or 2-bit, both inputs must be K-major. * TMA requires an alignment of 128 bits. As a result, for a type with `B` bits, `B x TILE_K` must be a multiple of 128 bits. * The type of the scale and zero-point type must be two bytes or more. * The group size must be equal to gemm-k size (indicating a broadcast), or it must be a multiple of the threadblock-k size. ## Upcoming features * Optimizations for memory bound cases. * Optimizations for scale and zero-point loading when the group size is not equal to the threadblock-k size.
cutlass/examples/55_hopper_mixed_dtype_gemm/README.md/0
{ "file_path": "cutlass/examples/55_hopper_mixed_dtype_gemm/README.md", "repo_id": "cutlass", "token_count": 665 }
12
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cuda_runtime.h" #include <iostream> /** * Panic wrapper for unwinding CUTLASS errors */ #define CUTLASS_CHECK(status) \ { \ cutlass::Status error = status; \ if (error != cutlass::Status::kSuccess) { \ std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \ << std::endl; \ exit(EXIT_FAILURE); \ } \ } /** * Panic wrapper for unwinding CUDA runtime errors */ #define CUDA_CHECK(status) \ { \ cudaError_t error = status; \ if (error != cudaSuccess) { \ std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \ << " at line: " << __LINE__ << std::endl; \ exit(EXIT_FAILURE); \ } \ } /** * GPU timer for recording the elapsed time across kernel(s) launched in GPU stream */ struct GpuTimer { cudaStream_t _stream_id; cudaEvent_t _start; cudaEvent_t _stop; /// Constructor GpuTimer() : _stream_id(0) { CUDA_CHECK(cudaEventCreate(&_start)); CUDA_CHECK(cudaEventCreate(&_stop)); } /// Destructor ~GpuTimer() { CUDA_CHECK(cudaEventDestroy(_start)); CUDA_CHECK(cudaEventDestroy(_stop)); } /// Start the timer for a given stream (defaults to the default stream) void start(cudaStream_t stream_id = 0) { _stream_id = stream_id; CUDA_CHECK(cudaEventRecord(_start, _stream_id)); } /// Stop the timer void stop() { CUDA_CHECK(cudaEventRecord(_stop, _stream_id)); } /// Return the elapsed time (in milliseconds) float elapsed_millis() { float elapsed = 0.0; CUDA_CHECK(cudaEventSynchronize(_stop)); CUDA_CHECK(cudaEventElapsedTime(&elapsed, _start, _stop)); return elapsed; } };
cutlass/examples/common/helper.h/0
{ "file_path": "cutlass/examples/common/helper.h", "repo_id": "cutlass", "token_count": 2059 }
13
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/atom/copy_atom.hpp> #include <cute/algorithm/copy.hpp> #include <cute/tensor.hpp> #include <cute/tensor_predicate.hpp> namespace cute { // cooperative_copy<NumThreads, MaxVecBits>(thr_idx, src, dst) // Use NumThreads to copy src to dst with element vectorization up to MaxVecBits. // @pre 0 <= @a tid < NumThreads // @pre Tensors @a src and @a dst are aligned up to MaxVecBits. // template <uint32_t NumThreads, uint32_t MaxVecBits, class SrcEngine, class SrcLayout, class DstEngine, class DstLayout> CUTE_HOST_DEVICE void cooperative_copy(uint32_t const& tid, Tensor<SrcEngine, SrcLayout> const& src, Tensor<DstEngine, DstLayout> & dst) { // Assumes the shapes are static, can generalize CUTE_STATIC_ASSERT_V(size(src) == size(dst)); // Assumes the types are the same, can generalize static_assert(sizeof_bits_v<typename SrcEngine::value_type> == sizeof_bits_v<typename DstEngine::value_type>); static_assert(MaxVecBits == sizeof_bits_v<typename SrcEngine::value_type> || MaxVecBits == 8 || MaxVecBits == 16 || MaxVecBits == 32 || MaxVecBits == 64 || MaxVecBits == 128, "Expected MaxVecBits to be value size or 8 or 16 or 32 or 64 or 128 for alignment and performance."); // Check that the tensors are likely shared across threads: either gmem or smem static_assert((is_gmem<SrcEngine>::value || is_smem<SrcEngine>::value), "cooperative_copy expects shared gmem or smem source tensor."); static_assert((is_gmem<DstEngine>::value || is_smem<DstEngine>::value), "cooperative_copy expects shared gmem or smem destination tensor."); // Precondition on tid in DEBUG assert(tid < NumThreads); // Fallback - slow path, naive copy, vectorization disabled if constexpr(size(SrcLayout{}) % NumThreads != 0) { int index = static_cast<int>(tid); CUTE_UNROLL for(int i = 0; i < ceil_div(size(SrcLayout{}), NumThreads); i++) { if(index < size(SrcLayout{})) { dst[index] = src[index]; } index += NumThreads; } } else { // Fast path with vectorization // Precondition on pointer alignment in DEBUG assert(is_byte_aligned<max(MaxVecBits/8, 1u)>(raw_pointer_cast(src.data()))); assert(is_byte_aligned<max(MaxVecBits/8, 1u)>(raw_pointer_cast(dst.data()))); constexpr int elem_bits = sizeof_bits_v<typename SrcEngine::value_type>; // // Determine val+thr vectorization based on src/dst size and number of threads // NOTE: This heuristic promotes parallelization over vectorization // // The number of elements that can be vectorized in values constexpr int common_elem = decltype(max_common_vector(src, dst))::value; constexpr int common_bits = common_elem * elem_bits; constexpr int total_elem = decltype(size(src))::value; constexpr int total_bits = total_elem * elem_bits; static_assert(total_bits % NumThreads == 0); constexpr int total_bits_per_thr = total_bits / NumThreads; // If there are too many threads to allow a full elem copy, trunc the thrs and use elem_bits constexpr int max_vec_bits_by_thr = cute::max(elem_bits, total_bits_per_thr); // Cap the vectorization to the common bits, the max_vec_bits_by_thr, and the MaxVecBits constexpr int vec_bits = cute::min(common_bits, max_vec_bits_by_thr, static_cast<int>(MaxVecBits)); // Convert back to number of elements, safe_div static_assert((vec_bits % elem_bits) == 0); constexpr int vec_elem = vec_bits / elem_bits; // Use only part of threads if there's not enough work for all threads constexpr int vec_thrs = (total_elem % (vec_elem * NumThreads) == 0) ? NumThreads : (total_elem / vec_elem); static_assert(vec_thrs <= NumThreads); // The common layout of the two tensors that can be vectorized over threads // vidx -> coord auto common_layout = max_common_layout(get_nonswizzle_portion(src.layout()), get_nonswizzle_portion(dst.layout())); // Scale up the common_layout to cover the entire tensors // vidx -> coord auto full_perm = tile_to_shape(make_layout(common_layout), size(src)); // Create the Tiler // ((vid,tid),iter) auto layout_vt = logical_divide(full_perm, Layout<Shape<Int<vec_elem>, Int<vec_thrs>>>{}); // Apply and slice Tensor src_v = src.compose(layout_vt)(make_coord(_,tid),_); Tensor dst_v = dst.compose(layout_vt)(make_coord(_,tid),_); // Should account for vec_bits < 8 and/or vec_elem <= 1 // And also account for subbyte types, which could cause race conditions // Want to ENFORCE sufficient vectorization in those cases static_assert((vec_bits >= 8), "No support for subbyte copying"); using VecType = uint_bit_t<vec_bits>; #if 0 if (thread0()) { print(" "); print("cooperative_copy -- vec\n"); print(" "); print("NumThreads: "); print(NumThreads); print("\n"); print(" "); print("MaxVecBits: "); print(MaxVecBits); print("\n"); print(" "); print("src: "); print(src); print("\n"); print(" "); print("dst: "); print(dst); print("\n"); print(" "); print("common_layout: "); print(common_layout); print("\n"); print(" "); print("full_perm: "); print(full_perm); print("\n"); print(" "); print("Used vector: "); print(vec_elem); print("\n"); print(" "); print("Used threads: "); print(vec_thrs); print("\n"); print(" "); print("layout_vt: "); print(layout_vt); print("\n"); print(" "); print("src.compose(layout_vt): "); print(src.compose(layout_vt)); print("\n"); print(" "); print("dst.compose(layout_vt): "); print(dst.compose(layout_vt)); print("\n"); print(" "); print("src_v: "); print(src_v); print("\n"); print(" "); print("dst_v: "); print(dst_v); print("\n"); print(" "); print("recast<VecType const>(src_v): "); print(recast<VecType const>(src_v)); print("\n"); print(" "); print("recast<VecType const>(dst_v): "); print(recast<VecType const>(dst_v)); print("\n"); } #ifdef __CUDA_ARCH__ __syncthreads(); #endif #endif // If we're using all threads (static) or the tid is in in-range (dynamic) if (vec_thrs >= NumThreads or tid < vec_thrs) { return copy_if(TrivialPredTensor{}, recast<VecType const>(src_v), recast<VecType>(dst_v)); } } } template <uint32_t NumThreads, class SrcEngine, class SrcLayout, class DstEngine, class DstLayout> CUTE_HOST_DEVICE void cooperative_copy(uint32_t const& tid, Tensor<SrcEngine, SrcLayout> const& src, Tensor<DstEngine, DstLayout> & dst) { constexpr uint32_t MaxVecBits = sizeof_bits_v<typename SrcEngine::value_type>; return cooperative_copy<NumThreads, MaxVecBits>(tid, src, dst); } // Accept mutable temporaries template <uint32_t NumThreads, class SrcEngine, class SrcLayout, class DstEngine, class DstLayout> CUTE_HOST_DEVICE void cooperative_copy(uint32_t const& tid, Tensor<SrcEngine, SrcLayout> const& src, Tensor<DstEngine, DstLayout> && dst) { return cooperative_copy<NumThreads>(tid, src, dst); } // Accept mutable temporaries template <uint32_t NumThreads, uint32_t MaxVecBits, class SrcEngine, class SrcLayout, class DstEngine, class DstLayout> CUTE_HOST_DEVICE void cooperative_copy(uint32_t const& tid, Tensor<SrcEngine, SrcLayout> const& src, Tensor<DstEngine, DstLayout> && dst) { return cooperative_copy<NumThreads, MaxVecBits>(tid, src, dst); } } // end namespace cute
cutlass/include/cute/algorithm/cooperative_copy.hpp/0
{ "file_path": "cutlass/include/cute/algorithm/cooperative_copy.hpp", "repo_id": "cutlass", "token_count": 3756 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #if !defined(__CUDACC_RTC__) #include <cuda.h> #include <cinttypes> #endif #include <cute/config.hpp> #include <cute/arch/copy.hpp> #include <cute/arch/copy_sm90.hpp> #include <cute/container/alignment.hpp> #include <cute/container/bit_field.hpp> #include <cute/container/array.hpp> #include <cute/numeric/numeric_types.hpp> namespace cute { ////////////////////////////////////////////////////////////////////////////////////////////////////// /// Barriers are 64-bit of user-managed information used in broadly two types syncronization patterns /// 1) arrive/wait on threads (usage: cp.async and warp-specialized kernels) /// 2) transaction-based (usage: TMA transaction where a CTA issues one transaction) ////////////////////////////////////////////////////////////////////////////////////////////////////// // Initialize barrier present in shared memory CUTE_HOST_DEVICE void initialize_barrier(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem int thread_count = 1) // Thread count expected to arrive/wait on this barrier { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier); asm volatile ("mbarrier.init.shared::cta.b64 [%0], %1;\n" :: "r"(smem_int_ptr), "r"(thread_count)); #endif } // Set the number of bytes transfered per transaction and perform an arrive operation as well CUTE_HOST_DEVICE void set_barrier_transaction_bytes(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem uint32_t bytes) // Number of bytes transfered by per TMA transaction { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier); asm volatile ("mbarrier.arrive.expect_tx.shared::cta.b64 _, [%0], %1;\n" :: "r"(smem_int_ptr), "r"(bytes)); #endif } // Barrier wait CUTE_HOST_DEVICE void wait_barrier(uint64_t& smem_barrier, // 64 bits user-manged barrier in smem int phase_bit) // Current phase bit the barrier waiting to flip { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier); asm volatile( "{\n" ".reg .pred P1;\n" "LAB_WAIT:\n" "mbarrier.try_wait.parity.shared::cta.b64 P1, [%0], %1;\n" "@P1 bra.uni DONE;\n" "bra.uni LAB_WAIT;\n" "DONE:\n" "}\n" :: "r"(smem_int_ptr), "r"(phase_bit)); #endif } // Barrier arrive CUTE_HOST_DEVICE void arrive_barrier(uint64_t& smem_barrier) // 64 bits user-manged barrier in smem { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint32_t smem_int_ptr = cast_smem_ptr_to_uint(&smem_barrier); asm volatile( "{\n" ".reg .b64 state; \n" "mbarrier.arrive.shared::cta.b64 state, [%0];\n" "}\n" :: "r"(smem_int_ptr)); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// // TMA Descriptor and utilities //////////////////////////////////////////////////////////////////////////////////////////////////// namespace TMA { enum class SmemSwizzleBits : uint8_t { DISABLE = 0, B32 = 1, B64 = 2, B128 = 3, }; #if (__CUDACC_VER_MAJOR__ >= 12) #if !defined(__CUDACC_RTC__) /// @return The TMA descriptor datatype enum corresponding to T. template <class T> inline CUtensorMapDataType to_CUtensorMapDataType() { if constexpr (is_same_v<T, int8_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else if constexpr (is_same_v<T, uint8_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else if constexpr (is_same_v<T, float_e4m3_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else if constexpr (is_same_v<T, float_e5m2_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT8; } else if constexpr (is_same_v<T, uint16_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT16; } else if constexpr (is_same_v<T, uint32_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT32; } else if constexpr (is_same_v<T, uint64_t>) { return CU_TENSOR_MAP_DATA_TYPE_UINT64; } else if constexpr (is_same_v<T, int32_t>) { return CU_TENSOR_MAP_DATA_TYPE_INT32; } else if constexpr (is_same_v<T, int64_t>) { return CU_TENSOR_MAP_DATA_TYPE_INT64; } else if constexpr (is_same_v<T, half_t>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT16; } else if constexpr (is_same_v<T, float>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT32; } else if constexpr (is_same_v<T, double>) { return CU_TENSOR_MAP_DATA_TYPE_FLOAT64; } else if constexpr (is_same_v<T, bfloat16_t>) { return CU_TENSOR_MAP_DATA_TYPE_BFLOAT16; } else if constexpr (is_same_v<T, tfloat32_t>) { return CU_TENSOR_MAP_DATA_TYPE_TFLOAT32; } else { static_assert(sizeof(T) < 0, "Unknown TMA Format!"); } } inline CUtensorMapSwizzle to_CUtensorMapSwizzle(SmemSwizzleBits const& t) { switch (t) { default: assert(false && "Unknown SmemSwizzleBits!"); case SmemSwizzleBits::DISABLE: return CU_TENSOR_MAP_SWIZZLE_NONE; case SmemSwizzleBits::B32: return CU_TENSOR_MAP_SWIZZLE_32B; case SmemSwizzleBits::B64: return CU_TENSOR_MAP_SWIZZLE_64B; case SmemSwizzleBits::B128: return CU_TENSOR_MAP_SWIZZLE_128B; } } #endif // !defined(__CUDACC_RTC__) #endif // (__CUDACC_VER_MAJOR__ >= 12) } // end namespace TMA #if (__CUDACC_VER_MAJOR__ >= 12) && !defined(__CUDACC_RTC__) using TmaDescriptor = CUtensorMap; using Im2ColTmaDescriptor = CUtensorMap; #else using TmaDescriptor = struct alignas(64) { char bytes[128]; }; using Im2ColTmaDescriptor = struct alignas(64) { char bytes[128]; }; #endif //////////////////////////////////////////////////////////////////////////////////////////////////// /// Initiates a TensorMap Prefetch //////////////////////////////////////////////////////////////////////////////////////////////////// CUTE_HOST_DEVICE void prefetch_tma_descriptor(TmaDescriptor const* desc_ptr) { #if defined(CUTE_ARCH_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); // Prefetch TMA Descriptor using generic addressing (i.e. no specific state space: const or param) asm volatile ( "prefetch.tensormap [%0];" : : "l"(gmem_int_desc) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Trying to use TMA Descriptor Prefetch without CUTE_ARCH_TMA_SM90_ENABLED."); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Perform a TensorMap modification (by each field) //////////////////////////////////////////////////////////////////////////////////////////////////// // Replace tensor pointer directly in GMEM CUTE_HOST_DEVICE void tma_descriptor_replace_addr_in_global_mem(TmaDescriptor const* desc_ptr, void const* const new_tensor_ptr) { #if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); uint64_t const new_desc_addr = reinterpret_cast<uint64_t>(new_tensor_ptr); asm volatile ( "tensormap.replace.tile.global_address.global.b1024.b64 [%0], %1;" :: "l"(gmem_int_desc), "l"(new_desc_addr)); #else CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3"); #endif } // Replace tensor pointer by bringing the tensormap from GMEM into the shared memory CUTE_HOST_DEVICE void tma_descriptor_replace_addr_in_shared_mem(TmaDescriptor& smem_desc, void const* const new_tensor_ptr) { #if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED) uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc); uint64_t const new_desc_addr = reinterpret_cast<uint64_t>(new_tensor_ptr); uint64_t const smem_int64_desc = 0; asm volatile ( "cvt.u64.u32 %0, %1;" :: "l"(smem_int64_desc), "r"(smem_int_desc)); asm volatile ( "tensormap.replace.tile.global_address.shared::cta.b1024.b64 [%0], %1;" :: "l"(smem_int64_desc), "l"(new_desc_addr)); #else CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3"); #endif } // Replace tensor dims and strides for GEMMs by bringing the tensormap from GMEM into the shared memory CUTE_HOST_DEVICE void tma_descriptor_replace_dims_strides_in_shared_mem(TmaDescriptor & smem_desc, cute::array<uint32_t, 3> const& prob_shape, cute::array<uint64_t, 3> const& prob_stride) { #if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED) uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc); uint64_t const smem_int64_desc = 0; asm volatile ( "cvt.u64.u32 %0, %1;" :: "l"(smem_int64_desc), "r"(smem_int_desc)); asm volatile ( "tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 0, %1;" :: "l"(smem_int64_desc), "r"(prob_shape[0])); asm volatile ( "tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 1, %1;" :: "l"(smem_int64_desc), "r"(prob_shape[1])); asm volatile ( "tensormap.replace.tile.global_dim.shared::cta.b1024.b32 [%0], 2, %1;" :: "l"(smem_int64_desc), "r"(prob_shape[2])); // Strides must be a multiple of 16. Also, stride for the intermost dimension is implicitly 1 asm volatile ( "tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 0, %1;" :: "l"(smem_int64_desc), "l"(prob_stride[1] >> 4)); asm volatile ( "tensormap.replace.tile.global_stride.shared::cta.b1024.b64 [%0], 1, %1;" :: "l"(smem_int64_desc), "l"(prob_stride[2] >> 4)); #else CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3"); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Perform a fused copy and fence operation (needed when modifying tensormap in shared memory) //////////////////////////////////////////////////////////////////////////////////////////////////// CUTE_HOST_DEVICE void tma_descriptor_cp_fence_release(TmaDescriptor const* gmem_desc_ptr, TmaDescriptor& smem_desc) { #if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(gmem_desc_ptr); uint32_t smem_int_desc = cast_smem_ptr_to_uint(&smem_desc); asm volatile ( "tensormap.cp_fenceproxy.global.shared::cta.tensormap::generic.release.gpu.sync.aligned [%0], [%1], 128;" :: "l"(gmem_int_desc), "r"(smem_int_desc)); #else CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3"); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Perform a release fence operation (needed when modifying tensormap directly in GMEM) //////////////////////////////////////////////////////////////////////////////////////////////////// CUTE_HOST_DEVICE void tma_descriptor_fence_release() { #if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED) asm volatile ("fence.proxy.tensormap::generic.release.gpu;"); #else CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3"); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// /// Perform a acquire fence operation //////////////////////////////////////////////////////////////////////////////////////////////////// CUTE_HOST_DEVICE void tma_descriptor_fence_acquire(TmaDescriptor const* desc_ptr) { #if defined(CUTE_ARCH_DEVICE_MODIFIABLE_TMA_SM90_ENABLED) uint64_t gmem_int_desc = reinterpret_cast<uint64_t>(desc_ptr); asm volatile ( "fence.proxy.tensormap::generic.acquire.gpu [%0], 128;" : : "l"(gmem_int_desc) : "memory"); asm volatile ( "cvta.global.u64 %0, %0;" : : "l"(gmem_int_desc), "l"(gmem_int_desc) : "memory"); #else CUTE_INVALID_CONTROL_PATH("Using TMA Descriptor modification without CUTE_ARCH_TMA_SM90_ENABLED and CUDA 12.3"); #endif } /////////////////////////////////////////////////////////////////////////////// } // end namespace cute
cutlass/include/cute/arch/copy_sm90_desc.hpp/0
{ "file_path": "cutlass/include/cute/arch/copy_sm90_desc.hpp", "repo_id": "cutlass", "token_count": 5497 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates subbyte trivial types in a packed storage. */ #pragma once #include <cute/config.hpp> #include <cute/numeric/numeric_types.hpp> #include <cute/numeric/integral_constant.hpp> namespace cute { // // Underlying subbyte storage type // template <class T> using subbyte_storage_type_t = conditional_t<(cute::sizeof_bits_v<T> <= 8), uint8_t, conditional_t<(cute::sizeof_bits_v<T> <= 16), uint16_t, conditional_t<(cute::sizeof_bits_v<T> <= 32), uint32_t, conditional_t<(cute::sizeof_bits_v<T> <= 64), uint64_t, conditional_t<(cute::sizeof_bits_v<T> <= 128), uint128_t, T>>>>>; template <class T> struct subbyte_iterator; template <class, class> struct swizzle_ptr; // // subbyte_reference // Proxy object for sub-byte element references // template <class T> struct subbyte_reference { // Iterator Element type (const or non-const) using element_type = T; // Iterator Value type without type qualifier. using value_type = remove_cv_t<T>; // Storage type (const or non-const) using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>; static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported"); static_assert(sizeof_bits_v<element_type> <= sizeof_bits_v<storage_type>, "Size of Element must not be greater than Storage."); private: // Bitmask for covering one item static constexpr storage_type BitMask = storage_type(storage_type(-1) >> (sizeof_bits_v<storage_type> - sizeof_bits_v<element_type>)); // Flag for fast branching on straddled elements static constexpr bool is_storage_unaligned = ((sizeof_bits_v<storage_type> % sizeof_bits_v<element_type>) != 0); friend struct subbyte_iterator<T>; // Pointer to storage element storage_type* ptr_ = nullptr; // Bit index of value_type starting position within storage_type element. // RI: 0 <= idx_ < sizeof_bit<storage_type> uint8_t idx_ = 0; // Ctor template <class PointerType> CUTE_HOST_DEVICE constexpr subbyte_reference(PointerType* ptr, uint8_t idx = 0) : ptr_(reinterpret_cast<storage_type*>(ptr)), idx_(idx) {} public: // Copy Ctor CUTE_HOST_DEVICE constexpr subbyte_reference(subbyte_reference const& other) { *this = element_type(other); } // Copy Assignment CUTE_HOST_DEVICE constexpr subbyte_reference& operator=(subbyte_reference const& other) { return *this = element_type(other); } // Assignment template <class T_ = element_type> CUTE_HOST_DEVICE constexpr enable_if_t<!is_const_v<T_>, subbyte_reference&> operator=(element_type x) { static_assert(is_same_v<T_, element_type>, "Do not specify template arguments!"); storage_type item = (reinterpret_cast<storage_type const&>(x) & BitMask); // Update the current storage element storage_type bit_mask_0 = storage_type(BitMask << idx_); ptr_[0] = storage_type((ptr_[0] & ~bit_mask_0) | (item << idx_)); // If value_type is unaligned with storage_type (static) and this is a straddled value (dynamic) if (is_storage_unaligned && idx_ + sizeof_bits_v<value_type> > sizeof_bits_v<storage_type>) { uint8_t straddle_bits = uint8_t(sizeof_bits_v<storage_type> - idx_); storage_type bit_mask_1 = storage_type(BitMask >> straddle_bits); // Update the next storage element ptr_[1] = storage_type((ptr_[1] & ~bit_mask_1) | (item >> straddle_bits)); } return *this; } // Comparison of referenced values CUTE_HOST_DEVICE constexpr friend bool operator==(subbyte_reference const& x, subbyte_reference const& y) { return x.get() == y.get(); } CUTE_HOST_DEVICE constexpr friend bool operator!=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() != y.get(); } CUTE_HOST_DEVICE constexpr friend bool operator< (subbyte_reference const& x, subbyte_reference const& y) { return x.get() < y.get(); } CUTE_HOST_DEVICE constexpr friend bool operator> (subbyte_reference const& x, subbyte_reference const& y) { return x.get() > y.get(); } CUTE_HOST_DEVICE constexpr friend bool operator<=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() <= y.get(); } CUTE_HOST_DEVICE constexpr friend bool operator>=(subbyte_reference const& x, subbyte_reference const& y) { return x.get() >= y.get(); } // Value CUTE_HOST_DEVICE element_type get() const { if constexpr (is_same_v<bool, value_type>) { // Extract to bool -- potentially faster impl return bool((*ptr_) & (BitMask << idx_)); } else { // Extract to element_type // Extract from the current storage element auto item = storage_type((ptr_[0] >> idx_) & BitMask); // If value_type is unaligned with storage_type (static) and this is a straddled value (dynamic) if (is_storage_unaligned && idx_ + sizeof_bits_v<value_type> > sizeof_bits_v<storage_type>) { uint8_t straddle_bits = uint8_t(sizeof_bits_v<storage_type> - idx_); storage_type bit_mask_1 = storage_type(BitMask >> straddle_bits); // Extract from the next storage element item |= storage_type((ptr_[1] & bit_mask_1) << straddle_bits); } return reinterpret_cast<element_type&>(item); } } // Extract to type element_type CUTE_HOST_DEVICE constexpr operator element_type() const { return get(); } // Address subbyte_iterator<T> operator&() const { return {ptr_, idx_}; } }; // // subbyte_iterator // Random-access iterator over subbyte references // template <class T> struct subbyte_iterator { // Iterator Element type (const or non-const) using element_type = T; // Iterator Value type without type qualifier. using value_type = remove_cv_t<T>; // Storage type (const or non-const) using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>; // Reference proxy type using reference = subbyte_reference<element_type>; static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported"); static_assert(sizeof_bits_v<element_type> <= sizeof_bits_v<storage_type>, "Size of Element must not be greater than Storage."); private: template <class, class> friend struct swizzle_ptr; // Pointer to storage element storage_type* ptr_ = nullptr; // Bit index of value_type starting position within storage_type element. // RI: 0 <= idx_ < sizeof_bit<storage_type> uint8_t idx_ = 0; public: // Ctor subbyte_iterator() = default; // Ctor template <class PointerType> CUTE_HOST_DEVICE constexpr subbyte_iterator(PointerType* ptr, uint8_t idx = 0) : ptr_(reinterpret_cast<storage_type*>(ptr)), idx_(idx) { } CUTE_HOST_DEVICE constexpr reference operator*() const { return reference(ptr_, idx_); } CUTE_HOST_DEVICE constexpr subbyte_iterator& operator+=(uint64_t k) { k = sizeof_bits_v<value_type> * k + idx_; ptr_ += k / sizeof_bits_v<storage_type>; idx_ = k % sizeof_bits_v<storage_type>; return *this; } CUTE_HOST_DEVICE constexpr subbyte_iterator operator+(uint64_t k) const { return subbyte_iterator(ptr_, idx_) += k; } CUTE_HOST_DEVICE constexpr reference operator[](uint64_t k) const { return *(*this + k); } CUTE_HOST_DEVICE constexpr subbyte_iterator& operator++() { idx_ += sizeof_bits_v<value_type>; if (idx_ >= sizeof_bits_v<storage_type>) { ++ptr_; idx_ -= sizeof_bits_v<storage_type>; } return *this; } CUTE_HOST_DEVICE constexpr subbyte_iterator operator++(int) { subbyte_iterator ret(*this); ++(*this); return ret; } CUTE_HOST_DEVICE constexpr subbyte_iterator& operator--() { if (idx_ >= sizeof_bits_v<value_type>) { idx_ -= sizeof_bits_v<value_type>; } else { --ptr_; idx_ += sizeof_bits_v<storage_type> - sizeof_bits_v<value_type>; } return *this; } CUTE_HOST_DEVICE constexpr subbyte_iterator operator--(int) { subbyte_iterator ret(*this); --(*this); return ret; } CUTE_HOST_DEVICE constexpr friend bool operator==(subbyte_iterator const& x, subbyte_iterator const& y) { return x.ptr_ == y.ptr_ && x.idx_ == y.idx_; } CUTE_HOST_DEVICE constexpr friend bool operator< (subbyte_iterator const& x, subbyte_iterator const& y) { return x.ptr_ < y.ptr_ || (x.ptr_ == y.ptr_ && x.idx_ < y.idx_); } CUTE_HOST_DEVICE constexpr friend bool operator!=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(x == y); } CUTE_HOST_DEVICE constexpr friend bool operator<=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(y < x); } CUTE_HOST_DEVICE constexpr friend bool operator> (subbyte_iterator const& x, subbyte_iterator const& y) { return (y < x); } CUTE_HOST_DEVICE constexpr friend bool operator>=(subbyte_iterator const& x, subbyte_iterator const& y) { return !(x < y); } // Conversion to raw pointer with loss of subbyte index CUTE_HOST_DEVICE constexpr friend T* raw_pointer_cast(subbyte_iterator const& x) { assert(x.idx_ == 0); return reinterpret_cast<T*>(x.ptr_); } // Conversion to NewT_ with possible loss of subbyte index template <class NewT_> CUTE_HOST_DEVICE constexpr friend auto recast_ptr(subbyte_iterator const& x) { using NewT = conditional_t<(is_const_v<T>), NewT_ const, NewT_>; if constexpr (cute::is_subbyte_v<NewT>) { // Making subbyte_iter, preserve the subbyte idx return subbyte_iterator<NewT>(x.ptr_, x.idx_); } else { // Not subbyte, assume/assert subbyte idx 0 return reinterpret_cast<NewT*>(raw_pointer_cast(x)); } CUTE_GCC_UNREACHABLE; } CUTE_HOST_DEVICE friend void print(subbyte_iterator x) { printf("subptr[%db](%p.%u)", int(sizeof_bits_v<T>), x.ptr_, x.idx_); } }; // // array_subbyte // Statically sized array for non-byte-aligned data types // template <class T, size_t N> struct array_subbyte { using element_type = T; using value_type = remove_cv_t<T>; using pointer = element_type*; using const_pointer = element_type const*; using size_type = size_t; using difference_type = ptrdiff_t; // // References // using reference = subbyte_reference<element_type>; using const_reference = subbyte_reference<element_type const>; // // Iterators // using iterator = subbyte_iterator<element_type>; using const_iterator = subbyte_iterator<element_type const>; // Storage type (const or non-const) using storage_type = conditional_t<(is_const_v<T>), subbyte_storage_type_t<T> const, subbyte_storage_type_t<T>>; static_assert(sizeof_bits_v<storage_type> % 8 == 0, "Storage type is not supported"); private: // Number of storage elements, ceil_div static constexpr size_type StorageElements = (N * sizeof_bits_v<value_type> + sizeof_bits_v<storage_type> - 1) / sizeof_bits_v<storage_type>; // Internal storage storage_type storage[StorageElements]; public: constexpr array_subbyte() = default; CUTE_HOST_DEVICE constexpr array_subbyte(array_subbyte const& x) { CUTE_UNROLL for (size_type i = 0; i < StorageElements; ++i) { storage[i] = x.storage[i]; } } CUTE_HOST_DEVICE constexpr size_type size() const { return N; } CUTE_HOST_DEVICE constexpr size_type max_size() const { return N; } CUTE_HOST_DEVICE constexpr bool empty() const { return !N; } // Efficient clear method CUTE_HOST_DEVICE constexpr void clear() { CUTE_UNROLL for (size_type i = 0; i < StorageElements; ++i) { storage[i] = storage_type(0); } } CUTE_HOST_DEVICE constexpr void fill(T const& value) { CUTE_UNROLL for (size_type i = 0; i < N; ++i) { at(i) = value; } } CUTE_HOST_DEVICE constexpr reference at(size_type pos) { return iterator(storage)[pos]; } CUTE_HOST_DEVICE constexpr const_reference at(size_type pos) const { return const_iterator(storage)[pos]; } CUTE_HOST_DEVICE constexpr reference operator[](size_type pos) { return at(pos); } CUTE_HOST_DEVICE constexpr const_reference operator[](size_type pos) const { return at(pos); } CUTE_HOST_DEVICE constexpr reference front() { return at(0); } CUTE_HOST_DEVICE constexpr const_reference front() const { return at(0); } CUTE_HOST_DEVICE constexpr reference back() { return at(N-1); } CUTE_HOST_DEVICE constexpr const_reference back() const { return at(N-1); } CUTE_HOST_DEVICE constexpr pointer data() { return reinterpret_cast<pointer>(storage); } CUTE_HOST_DEVICE constexpr const_pointer data() const { return reinterpret_cast<const_pointer>(storage); } CUTE_HOST_DEVICE constexpr storage_type* raw_data() { return storage; } CUTE_HOST_DEVICE constexpr storage_type const* raw_data() const { return storage; } CUTE_HOST_DEVICE constexpr iterator begin() { return iterator(storage); } CUTE_HOST_DEVICE constexpr const_iterator begin() const { return const_iterator(storage); } CUTE_HOST_DEVICE constexpr const_iterator cbegin() const { return begin(); } CUTE_HOST_DEVICE constexpr iterator end() { return iterator(storage) + N; } CUTE_HOST_DEVICE constexpr const_iterator end() const { return const_iterator(storage) + N; } CUTE_HOST_DEVICE constexpr const_iterator cend() const { return end(); } // // Comparison operators // }; // // Operators // template <class T, size_t N> CUTE_HOST_DEVICE constexpr void clear(array_subbyte<T,N>& a) { a.clear(); } template <class T, size_t N> CUTE_HOST_DEVICE constexpr void fill(array_subbyte<T,N>& a, T const& value) { a.fill(value); } } // namespace cute // // Specialize tuple-related functionality for cute::array_subbyte // #if defined(__CUDACC_RTC__) #include <cuda/std/tuple> #else #include <tuple> #endif namespace cute { template <size_t I, class T, size_t N> CUTE_HOST_DEVICE constexpr T& get(array_subbyte<T,N>& a) { static_assert(I < N, "Index out of range"); return a[I]; } template <size_t I, class T, size_t N> CUTE_HOST_DEVICE constexpr T const& get(array_subbyte<T,N> const& a) { static_assert(I < N, "Index out of range"); return a[I]; } template <size_t I, class T, size_t N> CUTE_HOST_DEVICE constexpr T&& get(array_subbyte<T,N>&& a) { static_assert(I < N, "Index out of range"); return cute::move(a[I]); } } // end namespace cute namespace CUTE_STL_NAMESPACE { template <class T> struct is_reference<cute::subbyte_reference<T>> : CUTE_STL_NAMESPACE::true_type {}; template <class T, size_t N> struct tuple_size<cute::array_subbyte<T,N>> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, cute::array_subbyte<T,N>> { using type = T; }; template <class T, size_t N> struct tuple_size<const cute::array_subbyte<T,N>> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, const cute::array_subbyte<T,N>> { using type = T; }; } // end namespace CUTE_STL_NAMESPACE #ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD namespace std { #if defined(__CUDACC_RTC__) template <class... _Tp> struct tuple_size; template <size_t _Ip, class... _Tp> struct tuple_element; #endif template <class T, size_t N> struct tuple_size<cute::array_subbyte<T,N>> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, cute::array_subbyte<T,N>> { using type = T; }; template <class T, size_t N> struct tuple_size<const cute::array_subbyte<T,N>> : CUTE_STL_NAMESPACE::integral_constant<size_t, N> {}; template <size_t I, class T, size_t N> struct tuple_element<I, const cute::array_subbyte<T,N>> { using type = T; }; } // end namespace std #endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
cutlass/include/cute/container/array_subbyte.hpp/0
{ "file_path": "cutlass/include/cute/container/array_subbyte.hpp", "repo_id": "cutlass", "token_count": 6841 }
16
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Barrier Operations on SM90+ */ #pragma once #include <cutlass/arch/memory_sm75.h> #include <cute/arch/cluster_sm90.hpp> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 900 && (__CUDACC_VER_MAJOR__ >= 12) #define CUDA_BARRIER_ENABLED 1 #else #define CUDA_BARRIER_ENABLED 0 #endif namespace cutlass { /// @brief namespace arch { //////////////////////////////////////////////////////////////////////////////////////////////////// // Enumerates the reserved named barriers to avoid potential conflicts // This enum class specifies the NamedBarriers reserved by CUTLASS. enum class ReservedNamedBarriers { EpilogueBarrier = 0, TransposeBarrier = 1, TransformBarrier = 2, StreamkBarrier0 = 3, StreamkBarrier1 = 4 , FirstUserBarrier = StreamkBarrier1 + 1 }; class NamedBarrier { // Data Members: // Range = [1 , NUM_THREADS_PER_CTA] // Range % warp-size (i.e 32) == 0 uint32_t const num_threads_; // Range : [0, 15] // Note that should be set to the final barrier ID, including ReserveNamedBarrierCount should be considered uint32_t const id_; public: // Constructor for CUTLASS developers: // effective barrier ID starts from 0 CUTLASS_DEVICE NamedBarrier(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) : num_threads_(num_threads), id_(static_cast<uint32_t>(reserved_named_barriers)) {} // Constructor for CUTLASS users: // effective barrier ID starts from ReservedNamedBarrierCount CUTLASS_DEVICE NamedBarrier(uint32_t num_threads, uint32_t id = 0) : num_threads_(num_threads), id_(id + ReservedNamedBarrierCount) { CUTLASS_ASSERT(id + ReservedNamedBarrierCount <= HardwareMaxNumNamedBarriers && "Effective barrier_id should not exceed 16."); } CUTLASS_DEVICE void arrive_and_wait() const { // Note: The value of id_ is already the final barrier id (set correctly in the constructor). NamedBarrier::arrive_and_wait_internal(num_threads_, id_); } CUTLASS_DEVICE void arrive() const { // Note: The value of id_ is already the final barrier id (set correctly in the constructor). NamedBarrier::arrive_internal(num_threads_, id_); } CUTLASS_DEVICE void sync() const { NamedBarrier::arrive_and_wait(); } // Static variants // Calling interface for CUTLASS users: // effective barrier ID starts from ReservedNamedBarrierCount CUTLASS_DEVICE static void arrive_and_wait(uint32_t num_threads, uint32_t barrier_id) { arrive_and_wait_internal(num_threads, barrier_id + ReservedNamedBarrierCount); } // Calling interface for CUTLASS developers: // effective barrier ID starts from 0 CUTLASS_DEVICE static void arrive_and_wait(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) { arrive_and_wait_internal(num_threads, static_cast<int>(reserved_named_barriers)); } // Calling interface for CUTLASS users: // effective barrier ID starts from ReservedNamedBarrierCount CUTLASS_DEVICE static void arrive(uint32_t num_threads, uint32_t barrier_id) { arrive_internal(num_threads, barrier_id + ReservedNamedBarrierCount); } // Calling interface for CUTLASS developers: // effective barrier ID starts from 0 CUTLASS_DEVICE static void arrive(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) { arrive_internal(num_threads, static_cast<int>(reserved_named_barriers)); } // Calling interface for CUTLASS users: // effective barrier ID starts from ReservedNamedBarrierCount CUTLASS_DEVICE static void sync(uint32_t num_threads, uint32_t barrier_id) { sync_internal(num_threads, barrier_id + ReservedNamedBarrierCount); } // Calling interface for CUTLASS developers: // effective barrier ID starts from 0 CUTLASS_DEVICE static void sync(uint32_t num_threads, ReservedNamedBarriers reserved_named_barriers) { sync_internal(num_threads, static_cast<int>(reserved_named_barriers)); } private: CUTLASS_DEVICE static void arrive_and_wait_internal(uint32_t num_threads, uint32_t barrier_id) { #if CUDA_BARRIER_ENABLED asm volatile("bar.sync %0, %1;" : : "r"(barrier_id), "r"(num_threads)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } CUTLASS_DEVICE static void arrive_internal(uint32_t num_threads, uint32_t barrier_id) { #if CUDA_BARRIER_ENABLED asm volatile("bar.arrive %0, %1;" : : "r"(barrier_id), "r"(num_threads)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } CUTLASS_DEVICE static void sync_internal(uint32_t num_threads, uint32_t barrier_id) { NamedBarrier::arrive_and_wait_internal(num_threads, barrier_id); } public: // Currently we reserve 8 NamedBarriers for CUTLASS' own use cases, // while leaving the renaming for general users. static const uint32_t ReservedNamedBarrierCount = static_cast<uint32_t>(ReservedNamedBarriers::FirstUserBarrier); static const uint32_t HardwareMaxNumNamedBarriers = 16; }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Hopper introduces a new cluster-wide barrier which handle with Cluster-wide arrive-wait behaviour. // This is an extension to the Ampere arrive-wait barriers // Note : Ampere arrive-wait Barriers have a larger max-arrive count (2^30) than Hopper arrive-wait Barriers (2^20). struct ClusterBarrier { using ValueType = uint64_t; protected: // Can never be initialized - can only be aliased to smem ValueType barrier_; public: CUTLASS_DEVICE ClusterBarrier() = delete; CUTLASS_DEVICE void init(uint32_t arrive_count) const { ClusterBarrier::init(&this->barrier_, arrive_count); } CUTLASS_DEVICE uint32_t test_wait(uint32_t phase, uint32_t pred=true) const { return ClusterBarrier::test_wait(&this->barrier_, phase, pred); } CUTLASS_DEVICE uint32_t try_wait(uint32_t phase) const { return ClusterBarrier::try_wait(&this->barrier_, phase); } CUTLASS_DEVICE void wait(uint32_t phase) const { ClusterBarrier::wait(&this->barrier_, phase); } // Barrier arrive on local smem CUTLASS_DEVICE void arrive() const { ClusterBarrier::arrive(&this->barrier_); } // Remote SMEM arrive with a perdicate (usually done to pick the thread doing the arrive) CUTLASS_DEVICE void arrive(uint32_t cta_id, uint32_t pred = true ) const { ClusterBarrier::arrive(&this->barrier_, cta_id, pred); } // // Static Versions // CUTLASS_DEVICE static void init(ValueType const* smem_ptr, uint32_t arrive_count) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" "mbarrier.init.shared::cta.b64 [%1], %0; \n" "}" : : "r"(arrive_count), "r"(smem_addr)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Static version of wait - in case we don't want to burn a register CUTLASS_DEVICE static void wait(ValueType const* smem_ptr, uint32_t phase) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); // Arbitrarily large timer value after which try-wait expires and re-tries. uint32_t ticks = 0x989680; asm volatile( "{\n\t" ".reg .pred P1; \n\t" "LAB_WAIT: \n\t" "mbarrier.try_wait.parity.shared::cta.b64 P1, [%0], %1, %2; \n\t" "@P1 bra.uni DONE; \n\t" "bra.uni LAB_WAIT; \n\t" "DONE: \n\t" "}" : : "r"(smem_addr), "r"(phase), "r"(ticks)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } CUTLASS_DEVICE static uint32_t test_wait(ValueType const* smem_ptr, uint32_t phase, uint32_t pred) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); uint32_t waitComplete; asm volatile( "{\n\t" ".reg .pred P1; \n\t" ".reg .pred P2; \n\t" "setp.eq.u32 P2, %3, 1;\n\t" "@P2 mbarrier.test_wait.parity.shared::cta.b64 P1, [%1], %2; \n\t" "selp.b32 %0, 1, 0, P1; \n\t" "}" : "=r"(waitComplete) : "r"(smem_addr), "r"(phase), "r"(pred)); return waitComplete; #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif return 0; } CUTLASS_DEVICE static uint32_t try_wait(ValueType const* smem_ptr, uint32_t phase) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); uint32_t waitComplete; asm volatile( "{\n\t" ".reg .pred P1; \n\t" "mbarrier.try_wait.parity.shared::cta.b64 P1, [%1], %2; \n\t" "selp.b32 %0, 1, 0, P1; \n\t" "}" : "=r"(waitComplete) : "r"(smem_addr), "r"(phase)); return waitComplete; #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif return 0; } // Static Predicated version of the above - in case we know the address. CUTLASS_DEVICE static void arrive(ValueType const* smem_ptr, uint32_t cta_id, uint32_t pred) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" ".reg .pred p;\n\t" ".reg .b32 remAddr32;\n\t" "setp.eq.u32 p, %2, 1;\n\t" "@p mapa.shared::cluster.u32 remAddr32, %0, %1;\n\t" "@p mbarrier.arrive.shared::cluster.b64 _, [remAddr32];\n\t" "}" : : "r"(smem_addr), "r"(cta_id), "r"(pred)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Barrier arrive on local smem CUTLASS_DEVICE static void arrive(ValueType const* smem_ptr) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" "mbarrier.arrive.shared::cta.b64 _, [%0];\n\t" "}" : : "r"(smem_addr)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } CUTLASS_DEVICE static void invalidate(ValueType const* smem_ptr) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" "mbarrier.inval.shared::cta.b64 [%0]; \n\t" "}" : : "r"(smem_addr)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // SM90 also introduces a new type of cluster-barrier which supports sync. // not just based on Arrive Count, but also transaction count (in bytes) struct ClusterTransactionBarrier : public ClusterBarrier { CUTLASS_DEVICE ClusterTransactionBarrier() = delete; // Performs an arrive operation + expected transaction bytes increment CUTLASS_DEVICE void arrive_and_expect_tx(uint32_t transaction_bytes) const { ClusterTransactionBarrier::arrive_and_expect_tx(&this->barrier_, transaction_bytes); } // Performs an arrive operation + expected transaction bytes increment CUTLASS_DEVICE void arrive_and_expect_tx(uint32_t transaction_bytes, uint32_t cta_id, uint32_t pred = 1u) const { ClusterTransactionBarrier::arrive_and_expect_tx(&this->barrier_, transaction_bytes , cta_id, pred); } // Performs an expected transaction bytes increment without doing an arrive operation CUTLASS_DEVICE void expect_transaction(uint32_t transaction_bytes) const { ClusterTransactionBarrier::expect_transaction(&this->barrier_, transaction_bytes); } // Performs an expected transaction bytes decrement without doing an arrive operation CUTLASS_DEVICE void complete_transaction(uint32_t transaction_bytes, uint32_t pred = 1) const { uint32_t cta_rank = cute::block_rank_in_cluster(); ClusterTransactionBarrier::complete_transaction(&this->barrier_, cta_rank, transaction_bytes, pred); } // Performs an expected transaction bytes decrement without doing an arrive operation CUTLASS_DEVICE void complete_transaction(uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred) const { ClusterTransactionBarrier::complete_transaction(&this->barrier_, dst_cta_id, transaction_bytes, pred); } // // Static Versions // // Performs an arrive operation + expected transaction bytes increment CUTLASS_DEVICE static void arrive_and_expect_tx(ValueType const* smem_ptr, uint32_t transaction_bytes) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" "mbarrier.arrive.expect_tx.shared::cta.b64 _, [%1], %0; \n\t" "}" : : "r"(transaction_bytes), "r"(smem_addr)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Performs an arrive operation + expected transaction bytes increment for a remote cta_id in a Cluster CUTLASS_DEVICE static void arrive_and_expect_tx( ValueType const* smem_ptr, uint32_t transaction_bytes, uint32_t cta_id, uint32_t pred) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" ".reg .pred p;\n\t" ".reg .b32 remAddr32;\n\t" "setp.eq.u32 p, %2, 1;\n\t" "@p mapa.shared::cluster.u32 remAddr32, %0, %1;\n\t" "@p mbarrier.arrive.expect_tx.shared::cluster.b64 _, [remAddr32], %3;\n\t" "}" : : "r"(smem_addr), "r"(cta_id), "r"(pred), "r"(transaction_bytes)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Performs an expected transaction bytes increment without doing an arrive operation CUTLASS_DEVICE static void expect_transaction(ValueType const* smem_ptr, uint32_t transaction_bytes) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" "mbarrier.expect_tx.shared::cta.b64 [%1], %0; \n\t" "}" : : "r"(transaction_bytes), "r"(smem_addr)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Performs an expected transaction bytes decrement without doing an arrive operation CUTLASS_DEVICE static void complete_transaction( ValueType const* smem_ptr, uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred = 1) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); smem_addr = cute::set_block_rank(smem_addr, dst_cta_id); asm volatile( "{\n\t" ".reg .pred p;\n\t" "setp.eq.u32 p, %2, 1;\n\t" "@p mbarrier.complete_tx.shared::cluster.relaxed.cluster.b64 [%1], %0;" "}" : : "r"(transaction_bytes), "r"(smem_addr), "r"(pred)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // // DEPRECATED APIs // [[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE void arrive_and_reset_bytes(uint32_t transaction_bytes) const { arrive_and_expect_tx(transaction_bytes); } [[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE void arrive_and_reset_bytes(uint32_t transaction_bytes, uint32_t cta_id) const { arrive_and_expect_tx(transaction_bytes, cta_id); } [[deprecated("Use expect_transaction instead")]] CUTLASS_DEVICE void reset_bytes(uint32_t transaction_bytes) const { expect_transaction(transaction_bytes); } [[deprecated("Use complete_transaction instead")]] CUTLASS_DEVICE void commit(uint32_t transaction_bytes, uint32_t pred = 1) const { complete_transaction(transaction_bytes, pred); } [[deprecated("Use complete_transaction instead")]] CUTLASS_DEVICE void commit(uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred) const { complete_transaction(dst_cta_id, transaction_bytes, pred); } [[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE static void arrive_and_reset_bytes(ValueType const* smem_ptr, uint32_t transaction_bytes) { arrive_and_expect_tx(smem_ptr, transaction_bytes); } [[deprecated("Use arrive_and_expect_tx instead")]] CUTLASS_DEVICE static void arrive_and_reset_bytes(ValueType const* smem_ptr, uint32_t transaction_bytes, uint32_t cta_id, uint32_t pred) { arrive_and_expect_tx(smem_ptr, transaction_bytes, cta_id, pred); } [[deprecated("Use expect_transaction instead")]] CUTLASS_DEVICE static void reset_bytes(ValueType const* smem_ptr, uint32_t transaction_bytes) { expect_transaction(smem_ptr, transaction_bytes); } [[deprecated("Use complete_transaction instead")]] CUTLASS_DEVICE static void commit(ValueType const* smem_ptr, uint32_t dst_cta_id, uint32_t transaction_bytes, uint32_t pred = 1) { complete_transaction(smem_ptr, dst_cta_id, transaction_bytes, pred); } }; // Helps with visibility of barrier init operations across warps / cta / cluster // Available as a separate function so as to batch inits across barriers and fence once // Note : It must be composed with an appropriate sync instruction with the right scope // to ensure visibility eg. __syncthreads() or a cluster_arrive() + cluster_wait() CUTLASS_DEVICE void fence_barrier_init() { #if CUDA_BARRIER_ENABLED asm volatile( "{\n\t" "fence.mbarrier_init.release.cluster; \n" "}" ::); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Issue a shared memory fence for async operations CUTLASS_DEVICE void fence_view_async_shared() { #if CUDA_BARRIER_ENABLED asm volatile ( "{\n\t" "fence.proxy.async.shared::cta; \n" "}" ::); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } // Arrive on completion of in-flight cp.async operations issued by the calling thread CUTLASS_DEVICE void cpasync_barrier_arrive(uint64_t const* smem_ptr) { #if CUDA_BARRIER_ENABLED uint32_t smem_addr = cute::cast_smem_ptr_to_uint(smem_ptr); asm volatile( "{\n\t" "cp.async.mbarrier.arrive.shared::cta.b64 [%0];\n\t" "}" : : "r"(smem_addr)); #elif defined(__CUDA_ARCH__) asm volatile ("brkpt;\n" ::); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////// } // end namespace arch } // end namespace cutlass
cutlass/include/cutlass/arch/barrier.h/0
{ "file_path": "cutlass/include/cutlass/arch/barrier.h", "repo_id": "cutlass", "token_count": 7747 }
17
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief PTX for TMA Tensor Memory Access operators on memory added for SM90 */ #pragma once #include <cuda_runtime_api.h> #include "cutlass/cutlass.h" #include "cutlass/trace.h" #if defined(__CUDACC_RTC__) #include <cuda/std/type_traits> #else #include <type_traits> #include <cstdio> #endif #if ((__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))) # define CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED #endif namespace cutlass { #ifndef NDEBUG #define Return_Status(cudaError_t_status) \ if (cudaError_t_status != cudaSuccess) { \ fprintf(stderr, \ "[ ERROR: CUDA Runtime ] %s:%d: %s\n", \ __FILE__, \ __LINE__, \ cudaGetErrorString(cudaError_t_status)); \ return Status::kInvalid; \ } else { \ return Status::kSuccess; \ } #else #define Return_Status(cudaError_t_status) \ if (cudaError_t_status != cudaSuccess) { \ return Status::kInvalid; \ } else { \ return Status::kSuccess; \ } #endif struct ClusterLauncher { constexpr static int MaxClusterSize = 32; // Check for hardware compatibility static inline CUTLASS_HOST Status check_cluster_dims(dim3 grid, dim3 cluster) { if (((cluster.x * cluster.y * cluster.z) <= MaxClusterSize) && (grid.x % cluster.x == 0) && (grid.y % cluster.y == 0) && (grid.z % cluster.z == 0)) { return Status::kSuccess; } else { CUTLASS_TRACE_HOST("ClusterLauncher: Invalid cluster configuration -- aborting launch."); return Status::kInvalid; } } static inline CUTLASS_HOST Status #if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED) init(void const* kernel_function) #else init(void const* /* kernel_function */) #endif { #if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED) #if defined(CUTLASS_DEBUG_TRACE_LEVEL) && (CUTLASS_DEBUG_TRACE_LEVEL > 1) if (kernel_function == nullptr) { CUTLASS_TRACE_HOST("kernel_function is null"); return Status::kInvalid; } CUTLASS_TRACE_HOST("Checking previous error state before calling cudaFuncSetAttribute"); cudaError_t prevStatus = cudaGetLastError(); if (prevStatus != cudaSuccess) { fprintf(stderr, "[ ERROR: CUDA Runtime ] %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(prevStatus)); return Status::kInvalid; } CUTLASS_TRACE_HOST("Calling cudaFuncSetAttribute"); #endif // This attribute was added in CUDA 11.8. cudaError_t status = cudaFuncSetAttribute( kernel_function, cudaFuncAttributeNonPortableClusterSizeAllowed, 1); Return_Status(status); #else return Status::kInvalid; #endif } // This is the method we expect to use going forward static inline CUTLASS_HOST Status launch( dim3 const grid_dims, dim3 const cluster_dims, dim3 const block_dims, size_t const smem_size, cudaStream_t cuda_stream, void const* kernel, void** kernel_params) { #if defined(CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED) if (check_cluster_dims(grid_dims, cluster_dims) != Status::kSuccess) { CUTLASS_TRACE_HOST("ClusterLauncher: check_cluster_dims() failed. Aborting."); return Status::kInvalid; } auto init_status = init(kernel); if (init_status != Status::kSuccess) { CUTLASS_TRACE_HOST("ClusterLauncher: init(kernel) failed with status " << int(init_status) << ". Aborting."); return Status::kInvalid; } cudaLaunchConfig_t launch_config; launch_config.gridDim = {grid_dims.x, grid_dims.y, grid_dims.z}; launch_config.blockDim = {block_dims.x, block_dims.y, block_dims.z}; launch_config.dynamicSmemBytes = smem_size; launch_config.stream = cuda_stream; cudaLaunchAttribute launch_attribute[1]; launch_attribute[0].id = cudaLaunchAttributeClusterDimension; launch_attribute[0].val.clusterDim.x = cluster_dims.x; launch_attribute[0].val.clusterDim.y = cluster_dims.y; launch_attribute[0].val.clusterDim.z = cluster_dims.z; launch_config.attrs = launch_attribute; launch_config.numAttrs = 1; CUTLASS_TRACE_HOST("ClusterLauncher: Launching GPC_CLUSTER_GRID GridDims = " "(" << grid_dims.x << ", " << grid_dims.y << ", " << grid_dims.z << "), " "And ClusterDims = " "(" << cluster_dims.x << ", " << cluster_dims.y << ", " << cluster_dims.z << ")\n"); cudaError_t status = cudaLaunchKernelExC(&launch_config, kernel, kernel_params); Return_Status(status); #else CUTLASS_TRACE_HOST("ClusterLauncher: CUTLASS_SM90_CLUSTER_LAUNCH_ENABLED not defined! Aborting cluster launch."); return Status::kInvalid; #endif } }; namespace detail { template<class Arg> void* checked_addressof(Arg&& arg) { static_assert(! std::is_rvalue_reference_v<Arg> || ! std::is_const_v<Arg>, "You cannot take the address of a const rvalue reference (const T&&)."); // We use std::addressof to ensure we get the address, // in case the type has an overloaded operator&. // Note that this precludes `const T&&` references. return const_cast<void*>(reinterpret_cast<void const*>(std::addressof(arg))); } } // namespace detail //! Parameters for launch_on_cluster (see below). struct ClusterLaunchParams { //! Grid dimensions dim3 grid_dims{1, 1, 1}; //! Block dimensions dim3 block_dims{1, 1, 1}; //! Cluster dimensions dim3 cluster_dims{1, 1, 1}; //! Number of bytes required for the kernel's shared memory. int smem_size_in_bytes = 0; //! CUDA stream on which to launch the kernel. cudaStream_t cuda_stream = nullptr; }; /// @brief Launch the kernel on the stream using cluster launch. /// /// @param params Cluster launch parameters (see above). /// @param kernel_ptr Pointer to the kernel function (see example). /// @param args Zero or more arguments to pass to the kernel. /// /// @tparam Args Types of the arguments passed to the kernel. /// Don't specify this/these template argument(s) explicitly. /// /// @return Status::Success on success, else an error code. /// /// @code /// template<class SharedMemoryType, class A, class B, class C> /// __global__ void kernel(A a, B b, C c); /// /// X x = get_x(); /// Y y = get_y(); /// Z z = get_z(); /// /// void const* kernel_ptr = /// const_cast<void const*>(reinterpret_cast<void*>( /// &kernel<SharedMemory, X, Y, Z>)); /// auto status = launch_kernel_on_cluster( /// {grid_dims, block_dims, cluster_dims, sizeof(SharedMemory)}, /// kernel_ptr, x, y, z); /// @endcode template<class ... Args> CUTLASS_HOST cutlass::Status launch_kernel_on_cluster(const ClusterLaunchParams& params, void const* kernel_ptr, Args&& ... args) { // Unfortunately, we find ourselves needing to pass in // the parameters as an array of raw pointers. if constexpr (sizeof...(Args) == 0) { return cutlass::ClusterLauncher::launch( params.grid_dims, params.cluster_dims, params.block_dims, params.smem_size_in_bytes, params.cuda_stream, kernel_ptr, nullptr); } else { void* kernel_params[sizeof...(Args)] = { detail::checked_addressof(std::forward<Args>(args))... }; return cutlass::ClusterLauncher::launch( params.grid_dims, params.cluster_dims, params.block_dims, params.smem_size_in_bytes, params.cuda_stream, kernel_ptr, kernel_params); } } } // namespace cutlass
cutlass/include/cutlass/cluster_launch.hpp/0
{ "file_path": "cutlass/include/cutlass/cluster_launch.hpp", "repo_id": "cutlass", "token_count": 3699 }
18
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Template for device-level fused activation's scale+bias+relu and Implicit GEMM Convolution */ #pragma once #include <limits> #include "cutlass/cutlass.h" #include "cutlass/device_kernel.h" #include "cutlass/conv/convolution.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template<typename ImplicitGemmFusionKernel_> class ImplicitGemmConvolutionFusion { public: using ImplicitGemmFusionKernel = ImplicitGemmFusionKernel_; using ElementA = typename ImplicitGemmFusionKernel::ElementA; using LayoutA = typename ImplicitGemmFusionKernel::LayoutA; using ElementB = typename ImplicitGemmFusionKernel::ElementB; using LayoutB = typename ImplicitGemmFusionKernel::LayoutB; // using ElementScaleBias = typename ImplicitGemmFusionKernel::ElementScaleBias; // using LayoutScaleBias = typename ImplicitGemmFusionKernel::LayoutScaleBias; using ElementC = typename ImplicitGemmFusionKernel::ElementC; using LayoutC = typename ImplicitGemmFusionKernel::LayoutC; using ElementAccumulator = typename ImplicitGemmFusionKernel::ElementAccumulator; using ElementCompute = typename ImplicitGemmFusionKernel::ElementCompute; using OperatorClass = typename ImplicitGemmFusionKernel::OperatorClass; using ArchTag = typename ImplicitGemmFusionKernel::ArchTag; using ThreadblockShape = typename ImplicitGemmFusionKernel::ThreadblockShape; using WarpShape = typename ImplicitGemmFusionKernel::WarpShape; using InstructionShape = typename ImplicitGemmFusionKernel::InstructionShape; using ThreadblockSwizzle = typename ImplicitGemmFusionKernel::ThreadblockSwizzle; using EpilogueOutputOp = typename ImplicitGemmFusionKernel::EpilogueOutputOp; static int const kStages = ImplicitGemmFusionKernel::kStages; static int const kConvDim = ImplicitGemmFusionKernel::kConvDim; using WarpMmaOperator = typename ImplicitGemmFusionKernel::WarpMmaOperator; using ArchMmaOperator = typename ImplicitGemmFusionKernel::ArchMmaOperator; using MathOperator = typename ImplicitGemmFusionKernel::MathOperator; static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemmFusionKernel::kConvolutionalOperator; static cutlass::conv::IteratorAlgorithm const kIteratorAlgorithm = ImplicitGemmFusionKernel::kIteratorAlgorithm; static int const kWarpCount = (ThreadblockShape::kM / WarpShape::kM) * (ThreadblockShape::kN / WarpShape::kN) * (ThreadblockShape::kK / WarpShape::kK); /// Argument structure using Arguments = typename ImplicitGemmFusionKernel::Arguments; private: /// Kernel parameters object typename ImplicitGemmFusionKernel::Params params_; public: /// Constructs Implicit GEMM ImplicitGemmConvolutionFusion() { } /// Determines whether the Implicit GEMM can execute the given problem. static Status can_implement(Arguments const &args) { // dispatch to iterators Status status = ImplicitGemmFusionKernel::Mma::IteratorA::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } status = ImplicitGemmFusionKernel::Mma::IteratorB::can_implement(args.problem_size); if (Status::kSuccess != status) { return status; } // Determine grid shape ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape( threadblock_swizzle.get_tiled_shape( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size), {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices)); if (!(grid.y <= std::numeric_limits<uint16_t>::max() && grid.z <= std::numeric_limits<uint16_t>::max())) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } /// Gets the workspace size static size_t get_workspace_size(Arguments const &args) { size_t workspace_bytes = 0; // Determine grid shape ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord grid_tiled_shape = threadblock_swizzle.get_tiled_shape( cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, args.problem_size), {ThreadblockShape::kM, ThreadblockShape::kN, ThreadblockShape::kK}, args.problem_size.split_k_slices); if(args.split_k_mode == SplitKMode::kParallel) { // Split-K parallel: CTAs in k-dimension write the partial results in a temporary workspace. // The user needs to call a reduction operator to optain the final output tensor workspace_bytes = sizeof(ElementAccumulator) * size_t(cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, args.problem_size)) * size_t(grid_tiled_shape.k()); } else if(args.split_k_mode == SplitKMode::kSerial && args.problem_size.split_k_slices > 1) { // Split-K serial: The user workspace is used to store semaphore and serialize writing the // final reduced output to user's output tensor workspace_bytes = sizeof(int) * size_t(grid_tiled_shape.m()) * size_t(grid_tiled_shape.n()); } return workspace_bytes; } /// Initializes GEMM state from arguments. Status initialize( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { if (args.problem_size.split_k_slices > 1) { if (!workspace) { return Status::kErrorWorkspaceNull; } cudaError_t status = cudaMemsetAsync(workspace, 0, get_workspace_size(args), stream); if (status != cudaSuccess) { return Status::kErrorInternal; } } // initialize the params structure from the arguments params_ = typename ImplicitGemmFusionKernel::Params( args, static_cast<int *>(workspace) ); int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage)); if (smem_size >= (48 << 10)) { cudaError_t result = cudaFuncSetAttribute(cutlass::Kernel<ImplicitGemmFusionKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (result != cudaSuccess) { return Status::kErrorInternal; } } return Status::kSuccess; } /// Initializes Impicit GEMM state from arguments. Status update(Arguments const &args, void *workspace = nullptr) { // update the params structure from the arguments params_.ptr_A = args.ref_A.data(); params_.ptr_B = args.ref_B.data(); params_.ptr_scale = args.ref_A_scale.data(); params_.ptr_bias = args.ref_A_bias.data(); params_.ptr_C = args.ref_C.data(); params_.ptr_D = args.ref_D.data(); params_.output_op = args.output_op; params_.semaphore = static_cast<int *>(workspace); return Status::kSuccess; } /// Runs the kernel using initialized state. Status run(cudaStream_t stream = nullptr) { ThreadblockSwizzle threadblock_swizzle; dim3 grid = threadblock_swizzle.get_grid_shape(params_.grid_tiled_shape); dim3 block(32 * kWarpCount, 1, 1); int smem_size = int(sizeof(typename ImplicitGemmFusionKernel::SharedStorage)); cutlass::Kernel<ImplicitGemmFusionKernel><<<grid, block, smem_size, stream>>>(params_); cudaError_t result = cudaGetLastError(); return result == cudaSuccess ? Status::kSuccess : Status::kErrorInternal; } /// Runs the kernel using initialized state. Status operator()(cudaStream_t stream = nullptr) { return run(stream); } /// Runs the kernel using initialized state. Status operator()( Arguments const &args, void *workspace = nullptr, cudaStream_t stream = nullptr) { Status status = initialize(args, workspace, stream); if (status == Status::kSuccess) { status = run(stream); } return status; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h/0
{ "file_path": "cutlass/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h", "repo_id": "cutlass", "token_count": 3362 }
19
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM B (filter tile) matrix from memory. This iterator assumes TensorNHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv2d_problem_size.h" #include "cutlass/conv/threadblock/conv2d_params.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_, conv::StrideSupport StrideSupport_ = conv::StrideSupport::kUnity, typename AccessType_ = cutlass::AlignedArray<Element_, ThreadMap_::kElementsPerAccess> > class Conv2dDgradFilterTileAccessIteratorOptimized; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad // on problem sizes with stride = {1x1} template < typename Shape_, typename Element_, typename ThreadMap_, typename AccessType_ > class Conv2dDgradFilterTileAccessIteratorOptimized < Shape_, Element_, ThreadMap_, conv::StrideSupport::kStrided, AccessType_ > { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNHWC; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); // // Parameters structure // struct Params : Conv2dStridedDgradFilterIteratorOptimizedParams { // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Conv2dStridedDgradFilterIteratorOptimizedParams const &base): Conv2dStridedDgradFilterIteratorOptimizedParams(base) { } CUTLASS_HOST_DEVICE Params( Conv2dProblemSize const &problem_size, Layout const &layout ): Conv2dStridedDgradFilterIteratorOptimizedParams( problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided} ) { } }; private: Conv2dStridedDgradFilterIteratorOptimizedParams const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; uint32_t predicates_[kAccessesPerVector]; int filter_k_; int filter_r_; int filter_s_; int start_r_; int start_s_; int64_t reset_bytes_s_; int64_t reset_bytes_r_; // // Assertions // // We map predicates into bits packed in this uint32_t container static_assert(ThreadMap::Iterations::kStrided * ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8, "Currently, the number of loads per iteration is limited by the size of the predicates container."); public: CUTLASS_HOST_DEVICE Conv2dDgradFilterTileAccessIteratorOptimized( Conv2dStridedDgradFilterIteratorOptimizedParams const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, int start_r, int start_s, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), predicates_{0}, filter_r_(start_r), filter_s_(start_s), start_r_(start_r), start_s_(start_s) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.row() + thread_coord.strided(); Index column = threadblock_offset.column() + thread_coord.contiguous(); reset_bytes_s_ = (problem_size_.num_gemm_k_filter_s(start_s_) - 1) * params_.inc_next[0]; reset_bytes_r_ = reset_bytes_s_ + (problem_size_.num_gemm_k_filter_r(start_r_) - 1) * params_.inc_next[1]; CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided; int filter_c = column + c * ThreadMap::Delta::kContiguous; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0); int pred_idx = c + s * ThreadMap::Iterations::kContiguous; predicates_[v] |= (pred << pred_idx); } } } TensorCoord coord{filter_k_, filter_r_, filter_s_, column}; pointer_ += params_.layout(coord) * sizeof_bits<Element>::value / 8; set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_DEVICE void advance() { int next_idx = 0; LongIndex reset_bytes = params_.reset_bytes; // Move filter_s by stride_w filter_s_ += problem_size_.stride_w; if (filter_s_ >= problem_size_.S) { // Restore filter_s filter_s_ = start_s_; // Move filter_r by stride_h filter_r_ += problem_size_.stride_h; #if 0 bool check = (filter_r_ < problem_size_.R); filter_r_ = check ? filter_r_ : start_r_; next_idx = check ? 1 : 2; reset_bytes += (check ? reset_bytes_s_ : reset_bytes_r_); #else asm volatile( "{\n\t" " .reg .pred %%p;\n\t" " .reg .s64 t1;\n\t" " setp.lt.s32 %%p, %3, %4;\n\t" " selp.s32 %0, %3, %5, %%p;\n\t" " selp.s32 %1, 1, 2, %%p;\n\t" " selp.s64 t1, %6, %7, %%p;\n\t" " add.s64 %2, %8, t1;\n\t" "}\n" : "=r"(filter_r_), "=r"(next_idx), "=l"(reset_bytes) : "r"(filter_r_), "r"(problem_size_.R), "r"(start_r_), "l"(reset_bytes_s_), "l"(reset_bytes_r_), "l"(reset_bytes)); #endif } // offset pointers by offset_bytes pointer_ += (params_.inc_next[next_idx] - reset_bytes); if (next_idx == 2) { filter_k_ += params_.filter_k_delta; } // Clear predicates if needed CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) { uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { predicates_[v] = (predicates_[v] & (~kClearMask)); } } } } /// Returns true if the current coordinate is within the filter tensor W CUTLASS_HOST_DEVICE bool valid() { LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous; return (predicates_[iteration_vector_] & (1u << pred_idx)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>(pointer_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_; } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dDgradFilterTileAccessIteratorOptimized &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { // Move to the next K coordinate within the tile pointer_ += params_.inc_next_strided; return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % AccessType::kElements) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conv2dDgradFilterTileAccessIteratorOptimized unity strided dgrad is more performant for dgrad // on problem sizes with stride = {1x1} template < typename Shape_, typename Element_, typename ThreadMap_, typename AccessType_ > class Conv2dDgradFilterTileAccessIteratorOptimized < Shape_, Element_, ThreadMap_, conv::StrideSupport::kUnity, AccessType_ > { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNHWC; using ThreadMap = ThreadMap_; using AccessType = AccessType_; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kOptimized; static StrideSupport const kStrideSupport = conv::StrideSupport::kUnity; static int const kConvDim = 2; using ConvProblemSize = typename conv::Conv2dProblemSize; static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), "Vectors implied by the thread map must be divisible by the access type."); // // Parameters structure // struct Params : Conv2dDgradFilterIteratorOptimizedParams { // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params(Conv2dDgradFilterIteratorOptimizedParams const &base): Conv2dDgradFilterIteratorOptimizedParams(base) { } CUTLASS_HOST_DEVICE Params( Conv2dProblemSize const &problem_size, Layout const &layout ): Conv2dDgradFilterIteratorOptimizedParams( problem_size, layout, sizeof_bits<Element>::value, {Shape::kRow, Shape::kColumn}, ThreadMap::kThreads, ThreadMap::kElementsPerAccess, {ThreadMap::Iterations::kContiguous, ThreadMap::Iterations::kStrided}, {ThreadMap::Delta::kContiguous, ThreadMap::Delta::kStrided} ) { } }; private: Conv2dDgradFilterIteratorOptimizedParams const &params_; Conv2dProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; LongIndex iteration_vector_; char const *pointer_; uint32_t predicates_[kAccessesPerVector]; int filter_rs_; int filter_k_; // // Assertions // // We map predicates into bits packed in this uint32_t container static_assert(ThreadMap::Iterations::kStrided * ThreadMap::Iterations::kContiguous < sizeof(predicates_) * 8, "Currently, the number of loads per iteration is limited by the size of the predicates container."); public: CUTLASS_HOST_DEVICE Conv2dDgradFilterTileAccessIteratorOptimized( Conv2dDgradFilterIteratorOptimizedParams const &params, Conv2dProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), predicates_{0}, filter_rs_(0), filter_k_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.row() + thread_coord.strided(); Index column = threadblock_offset.column() + thread_coord.contiguous(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { CUTLASS_PRAGMA_UNROLL for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { int filter_k = filter_k_ + s * ThreadMap::Delta::kStrided; int filter_c = column + c * ThreadMap::Delta::kContiguous; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { uint32_t pred = ((filter_k < problem_size_.K && (filter_c + v * AccessType::kElements) < problem_size_.C) ? 1u : 0); int pred_idx = c + s * ThreadMap::Iterations::kContiguous; predicates_[v] |= (pred << pred_idx); } } } pointer_ += ( filter_k_ * params.layout.stride()[2] + column ) * sizeof_bits<Element>::value / 8; set_iteration_index(0); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_vector_ = index % kAccessesPerVector; int residual_access = index / kAccessesPerVector; iteration_contiguous_ = residual_access % ThreadMap::Iterations::kContiguous; iteration_strided_ = residual_access / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { LongIndex next = params_.inc_next_rs; // moves to the next tile ++filter_rs_; if (filter_rs_ == params_.RS) { filter_rs_ = 0; next = params_.inc_next_k; filter_k_ += params_.filter_k_delta; } // Clear predicates if needed CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { if (filter_k_ + s * ThreadMap::Delta::kStrided >= problem_size_.K) { uint32_t kClearMask = ((1u << ThreadMap::Iterations::kContiguous) - 1) << (s * ThreadMap::Iterations::kContiguous); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < kAccessesPerVector; ++v) { predicates_[v] = (predicates_[v] & (~kClearMask)); } } } pointer_ += next; } /// Returns true if the current coordinate is within the filter tensor W CUTLASS_HOST_DEVICE bool valid() { LongIndex pred_idx = iteration_contiguous_ + iteration_strided_ * ThreadMap::Iterations::kContiguous; return (predicates_[iteration_vector_] & (1u << pred_idx)); } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { return reinterpret_cast<AccessType const *>(pointer_ + iteration_contiguous_ * ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) + iteration_vector_; } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv2dDgradFilterTileAccessIteratorOptimized &operator++() { ++iteration_vector_; if (iteration_vector_ < kAccessesPerVector) { return *this; } iteration_vector_ = 0; ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { // Move to the next K coordinate within the tile pointer_ += params_.inc_next_strided; return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(Conv2dProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.C % AccessType::kElements) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv2d_dgrad_filter_tile_access_iterator_optimized.h", "repo_id": "cutlass", "token_count": 7198 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief A Coord is a coordinate of arbitrary rank into a tensor or matrix */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cstdint> #else #include <stdint.h> #endif #include "cutlass/cutlass.h" namespace cutlass { //////////////////////////////////////////////////////////////////////////////////////////////////// /// Statically-sized array specifying Coords within a tensor template < int Rank_, ///< Logical rank of coordinate typename Index_ = int, ///< Index type used for each dimension typename LongIndex_ = int64_t ///< Long index type used for linear offsets > struct Coord { public: // // Type and constant definitions // /// Number of elements in Coord static int const kRank = Rank_; /// Index type used to store elements using Index = Index_; /// Type used to represent linear offsets using LongIndex = LongIndex_; private: // // Data members // /// Indices Index idx[kRank]; public: // // Methods // /// Default ctor initializes uniformly CUTLASS_HOST_DEVICE explicit Coord(Index value = Index(0)) { for (int i = 0; i < kRank; ++i) { idx[i] = value; } } /// Constructs from an array of integers CUTLASS_HOST_DEVICE Coord(Index const (&_idx)[kRank]) { for (int i = 0; i < kRank; ++i) { idx[i] = _idx[i]; } } /// Constructs from some other Coord template <int R, typename I, typename L> CUTLASS_HOST_DEVICE Coord(Coord<R, I, L> other) { for (int i = 0; i < kRank; ++i) { idx[i] = other[i]; } } /// Returns a slice of the Coord which may be larger or smaller in rank /// than this. template <int Slice> CUTLASS_HOST_DEVICE Coord<Slice, Index, LongIndex> slice(int start = 0, Index identity = 0) const { Coord<Slice, Index, LongIndex> result; for (int i = 0; i < Slice; ++i) { if (i + start < kRank) { result[i] = idx[i + start]; } else { result[i] = identity; } } return result; } /// Returns the index of the dimension with least value CUTLASS_HOST_DEVICE int min_dim_index() const { int i = 0; for (int j = 1; j < kRank; ++j) { if (idx[j] < idx[i]) { i = j; } } return i; } /// Returns the index of the dimension with greatest value CUTLASS_HOST_DEVICE int max_dim_index() const { int i = 0; for (int j = 1; j < kRank; ++j) { if (idx[j] > idx[i]) { i = j; } } return i; } /// Returns true if Coord is non-zero. CUTLASS_HOST_DEVICE explicit operator bool() const { for (int i = 0; i < kRank; ++i) { if (idx[i]) { return true; } } return false; } /// Returns true if Coord is uniformly zero. CUTLASS_HOST_DEVICE bool operator!() const { for (int i = 0; i < kRank; ++i) { if (idx[i]) { return false; } } return true; } /// Element-wise addition CUTLASS_HOST_DEVICE Coord operator+(Coord const& b) const { Coord c; for (int i = 0; i < kRank; ++i) { c.idx[i] = idx[i] + b.idx[i]; } return c; } /// Element-wise subtraction CUTLASS_HOST_DEVICE Coord operator-(Coord const& b) const { Coord c; for (int i = 0; i < kRank; ++i) { c.idx[i] = idx[i] - b.idx[i]; } return c; } /// Element-wise multiplication CUTLASS_HOST_DEVICE Coord operator*(Coord const& b) const { Coord c; for (int i = 0; i < kRank; ++i) { c.idx[i] = idx[i] * b.idx[i]; } return c; } /// Element-wise division CUTLASS_HOST_DEVICE Coord operator/(Coord const& b) const { Coord c; for (int i = 0; i < kRank; ++i) { c.idx[i] = idx[i] / b.idx[i]; } return c; } /// In-place addition CUTLASS_HOST_DEVICE Coord& operator+=(Coord const& b) { for (int i = 0; i < kRank; ++i) { idx[i] += b.idx[i]; } return *this; } /// In-place subtraction CUTLASS_HOST_DEVICE Coord& operator-=(Coord const& b) { for (int i = 0; i < kRank; ++i) { idx[i] -= b.idx[i]; } return *this; } /// In-place multiplication CUTLASS_HOST_DEVICE Coord& operator*=(Coord const& b) { for (int i = 0; i < kRank; ++i) { idx[i] *= b.idx[i]; } return *this; } /// In-place division CUTLASS_HOST_DEVICE Coord& operator/=(Coord const& b) { for (int i = 0; i < kRank; ++i) { idx[i] /= b.idx[i]; } return *this; } /// Member access operator CUTLASS_HOST_DEVICE Index& operator[](int dim) { return idx[dim]; } /// Member access operator CUTLASS_HOST_DEVICE Index const& operator[](int dim) const { return idx[dim]; } /// Computes the dot product with anotherCoord object CUTLASS_HOST_DEVICE LongIndex dot(Coord const& b, LongIndex sum = LongIndex(0)) const { for (int i = 0; i < kRank; ++i) { sum += idx[i] * b.idx[i]; } return sum; } /// Gets the index of a given Coord element template <int Dim> CUTLASS_HOST_DEVICE Index& at() { return idx[Dim]; } /// Access via index; may limit unrolling potential CUTLASS_HOST_DEVICE Index& at(int dim) { return idx[dim]; } /// Gets the index of a given Coord element template <int Dim> CUTLASS_HOST_DEVICE Index const& at() const { return idx[Dim]; } /// Access via index; may limit unrolling potential CUTLASS_HOST_DEVICE Index const& at(int dim) const { return idx[dim]; } /// Determines if two Coord<> objects are equal CUTLASS_HOST_DEVICE bool operator==(Coord const& b) const { bool equal = true; for (int i = 0; equal && i < kRank; ++i) { equal = (idx[i] == b.idx[i]); } return equal; } /// Not equal CUTLASS_HOST_DEVICE bool operator!=(Coord const& b) const { return !(*this == b); } /// Clamps a coordinate to a range specified by maximum and minimum values CUTLASS_HOST_DEVICE Coord& clamp(Coord const& max, Coord const& min = Coord()) { for (int i = 0; i < kRank; ++i) { idx[i] = __NV_STD_MAX(__NV_STD_MIN(idx[i], max.idx[i]), min.idx[i]); } return *this; } /// Returns the sum of all elements CUTLASS_HOST_DEVICE Index sum() const { Index sum_(idx[0]); for (int i = 1; i < kRank; ++i) { sum_ += idx[i]; } return sum_; } /// Returns the product of all elements CUTLASS_HOST_DEVICE LongIndex product() const { LongIndex product_(idx[0]); for (int i = 1; i < kRank; ++i) { product_ *= idx[i]; } return product_; } /// Less than operator CUTLASS_HOST_DEVICE bool operator<(Coord const &b) const { for (int i = 0; i < kRank; ++i) { if (!(idx[i] < b[i])) { return false; } } return true; } /// Less than or equals operator CUTLASS_HOST_DEVICE bool operator<=(Coord const &b) const { for (int i = 0; i < kRank; ++i) { if (!(idx[i] <= b[i])) { return false; } } return true; } /// Greater than operator CUTLASS_HOST_DEVICE bool operator>(Coord const &b) const { return !(*this <= b); } /// Greater than or equals operator CUTLASS_HOST_DEVICE bool operator>=(Coord const &b) const { return !(*this < b); } }; } // namespace cutlass //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /// Scalar multiplication template <int Rank, typename Index> CUTLASS_HOST_DEVICE Coord<Rank, Index> operator*(Index s, Coord<Rank, Index> coord) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Rank; ++i) { coord[i] *= s; } return coord; } /// Scalar multiplication template <int Rank, typename Index> CUTLASS_HOST_DEVICE Coord<Rank, Index> operator*(Coord<Rank, Index> coord, Index s) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Rank; ++i) { coord[i] *= s; } return coord; } /// Scalar division template <int Rank, typename Index> CUTLASS_HOST_DEVICE Coord<Rank, Index> operator/(Index s, Coord<Rank, Index> coord) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Rank; ++i) { coord[i] = s / coord[i]; } return coord; } /// Scalar division template <int Rank, typename Index> CUTLASS_HOST_DEVICE Coord<Rank, Index> operator/(Coord<Rank, Index> coord, Index s) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Rank; ++i) { coord[i] /= s; } return coord; } //////////////////////////////////////////////////////////////////////////////////////////////////// // // Integer-valued make_Coord // //////////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to make a 2-element coordinate template <typename T> CUTLASS_HOST_DEVICE Coord<1, T> make_Coord(T _0) { T values[1] = {_0}; return Coord<1, T>(values); } /// Helper to make a 2-element coordinate template <typename T> CUTLASS_HOST_DEVICE Coord<2, T> make_Coord(T _0, T _1) { T values[2] = {_0, _1}; return Coord<2, T>(values); } /// Helper to make a 3-element coordinate template <typename T> CUTLASS_HOST_DEVICE Coord<3, T> make_Coord(T _0, T _1, T _2) { T values[3] = {_0, _1, _2}; return Coord<3, T>(values); } /// Helper to make a 4-element coordinate template <typename T> CUTLASS_HOST_DEVICE Coord<4, T> make_Coord(T _0, T _1, T _2, T _3) { T values[4] = {_0, _1, _2, _3}; return Coord<4, T>(values); } /// Helper to make a 5-element coordinate template <typename T> CUTLASS_HOST_DEVICE Coord<5, T> make_Coord(T _0, T _1, T _2, T _3, T _4) { T values[5] = {_0, _1, _2, _3, _4}; return Coord<5, T>(values); } /// Helper to make a 1-element coordinate template <int N, typename T> CUTLASS_HOST_DEVICE Coord<N, T>make_Coord_with_padding(T _0) { Coord<N, T> coord; CUTLASS_PRAGMA_UNROLL for (int i = N - 1; i > 0; --i) { coord[i] = 0; } coord[0] = _0; return coord; } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
cutlass/include/cutlass/coord.h/0
{ "file_path": "cutlass/include/cutlass/coord.h", "repo_id": "cutlass", "token_count": 4608 }
21
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor for performing tensor-tensor broadacasts atop existing epilogues. Concretely, the opeartion performed is the following: UnaryOp( BinaryOp1( BinaryOp0( Activation((alpha * A @ B) + bias), beta * C0 ), beta * C1 ) ) where: - C0 and C1 have the same extents as the output - BinaryOp0 and BinaryOp1 perform elementwise binary operations - UnaryOp is an elementwise operation */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/epilogue/collective/detail.hpp" #include "cute/tensor.hpp" #include "cutlass/cuda_host_adapter.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace collective { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Collective epilogue that applies elementwise tensor-tensor operations atop other epilogues /// template < class StrideC_, class StrideD_, class ThreadEpilogueOp_, class EpilogueSchedule_, bool PerColumnBias_ = false > class EpilogueTensorBroadcast { public: // // Type Aliases // using EpilogueSchedule = EpilogueSchedule_; // derived types of output thread level operator using ThreadEpilogueOp = ThreadEpilogueOp_; using ElementOutput = typename ThreadEpilogueOp::ElementOutput; using ElementAccumulator = typename ThreadEpilogueOp::ElementAccumulator; using ElementCompute = typename ThreadEpilogueOp::ElementCompute; using ElementScalar = ElementCompute; using ElementBias = typename ThreadEpilogueOp::ElementBias; using ElementC = typename ThreadEpilogueOp::ElementC; using StrideC = StrideC_; using ElementD = typename ThreadEpilogueOp::ElementD; using StrideD = StrideD_; using ActivationFunctor = typename ThreadEpilogueOp::ActivationFunctor; static_assert(cute::rank(StrideC{}) == 3, "StrideCD must be rank-3: [M, N, L]"); static_assert(cute::rank(StrideD{}) == 3, "StrideCD must be rank-3: [M, N, L]"); static constexpr int kOutputAlignment = ThreadEpilogueOp::kCount; using AlignmentType = typename cute::uint_bit<sizeof_bits<ElementOutput>::value * kOutputAlignment>::type; static constexpr bool IsBinaryOp0Enabled = ThreadEpilogueOp::IsBinaryOp0Enabled; static constexpr bool IsBinaryOp1Enabled = ThreadEpilogueOp::IsBinaryOp1Enabled; static constexpr bool IsUnaryOpEnabled = ThreadEpilogueOp::IsUnaryOpEnabled; static constexpr bool PerColumnBias = PerColumnBias_; using BiasStride = typename cute::conditional_t<PerColumnBias, Stride<_0, _1, _0>, Stride<_1, _0, _0>>; struct SharedStorage { }; // Host side epilogue arguments struct Arguments { typename ThreadEpilogueOp::Params thread{}; StrideC dC{}; ElementD* ptr_D = nullptr; StrideD dD{}; ElementBias* ptr_Bias = nullptr; ElementC* ptr_C0 = nullptr; ElementC* ptr_C1 = nullptr; }; // Device side epilogue params using Params = Arguments; // // Methods // template <class ProblemShape> static constexpr Params to_underlying_arguments( [[maybe_unused]] ProblemShape const& _, Arguments const& args, [[maybe_unused]] void* workspace) { return args; } template <class ProblemShape> static size_t get_workspace_size(ProblemShape const& problem_shape, Arguments const& args) { return 0; } template <class ProblemShape> static cutlass::Status initialize_workspace(ProblemShape const& problem_shape, Arguments const& args, void* workspace, cudaStream_t stream, CudaHostAdapter* cuda_adapter = nullptr) { return cutlass::Status::kSuccess; } template <class ProblemShape> CUTLASS_HOST_DEVICE static bool can_implement( [[maybe_unused]] ProblemShape const& problem_shape, [[maybe_unused]] Arguments const& args) { return true; } CUTLASS_HOST_DEVICE EpilogueTensorBroadcast(Params const& params_) : params(params_), epilogue_op(params_.thread) { } CUTLASS_DEVICE bool is_source_needed() { return epilogue_op.is_source0_needed() || epilogue_op.is_source1_needed(); } template< class ProblemShapeMNKL, class BlockShapeMNK, class BlockCoordMNKL, class FrgEngine, class FrgLayout, class TiledMma, class ResidueMNK > CUTLASS_HOST_DEVICE void operator()( ProblemShapeMNKL problem_shape_mnkl, BlockShapeMNK blk_shape_MNK, BlockCoordMNKL blk_coord_mnkl, cute::Tensor<FrgEngine, FrgLayout> const& accumulators, TiledMma tiled_mma, ResidueMNK residue_mnk, int thread_idx, [[maybe_unused]] char* smem_buf) { using namespace cute; using X = Underscore; static_assert(cute::rank(ProblemShapeMNKL{}) == 4, "ProblemShapeMNKL must be rank 4"); static_assert(is_static<BlockShapeMNK>::value, "ThreadBlock tile shape must be static"); static_assert(cute::rank(BlockShapeMNK{}) == 3, "BlockShapeMNK must be rank 3"); static_assert(cute::rank(BlockCoordMNKL{}) == 4, "BlockCoordMNKL must be rank 4"); // Separate out problem shape for convenience auto M = get<0>(problem_shape_mnkl); auto N = get<1>(problem_shape_mnkl); auto L = get<3>(problem_shape_mnkl); auto stride_c = detail::get_epilogue_stride<EpilogueSchedule>(params.dC); auto stride_d = detail::get_epilogue_stride<EpilogueSchedule>(params.dD); auto stride_bias = detail::get_epilogue_stride<EpilogueSchedule>(BiasStride{}); // Represent the full output tensor Tensor mC0_mnl = make_tensor(make_gmem_ptr(params.ptr_C0), make_shape(M,N,L), stride_c); // (m,n,l) Tensor mC1_mnl = make_tensor(make_gmem_ptr(params.ptr_C1), make_shape(M,N,L), stride_c); // (m,n,l) Tensor mD_mnl = make_tensor(make_gmem_ptr(params.ptr_D), make_shape(M,N,L), stride_d); // (m,n,l) Tensor mBias_mnl = make_tensor(make_gmem_ptr(params.ptr_Bias), make_shape(M,N,L), stride_bias); // (m,n,l) Tensor gC0_mnl = local_tile(mC0_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) Tensor gC1_mnl = local_tile(mC1_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) Tensor gD_mnl = local_tile(mD_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) Tensor gBias_mnl = local_tile(mBias_mnl, blk_shape_MNK, make_coord(_,_,_), Step<_1,_1, X>{}); // (BLK_M,BLK_N,m,n,l) // Slice to get the tile this thread block is responsible for auto [m_coord, n_coord, k_coord, l_coord] = blk_coord_mnkl; Tensor gC0 = gC0_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N) Tensor gC1 = gC1_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N) Tensor gD = gD_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N) Tensor gBias = gBias_mnl(_,_,m_coord,n_coord,l_coord); // (BLK_M,BLK_N) // Partition source and destination tiles to match the accumulator partitioning auto thr_mma = tiled_mma.get_thread_slice(thread_idx); Tensor tCgD = thr_mma.partition_C(gD); // (VEC,THR_M,THR_N) Tensor tCgC0 = thr_mma.partition_C(gC0); // (VEC,THR_M,THR_N) Tensor tCgC1 = thr_mma.partition_C(gC1); // (VEC,THR_M,THR_N) Tensor tCgBias = thr_mma.partition_C(gBias); // (VEC,THR_M,THR_N) static_assert(is_static<FrgLayout>::value, "Accumulator layout must be static"); CUTE_STATIC_ASSERT_V(size(tCgC0) == size(tCgD), "Source and destination must have the same number of elements."); CUTE_STATIC_ASSERT_V(size(tCgC1) == size(tCgD), "Source and destination must have the same number of elements."); CUTE_STATIC_ASSERT_V(size(tCgD) == size(accumulators), "Accumulator count must have the same destination element count."); CUTE_STATIC_ASSERT_V(size(tCgBias) == size(accumulators), "Accumulator count must have the same destination element count."); auto cD = make_identity_tensor(make_shape(unwrap(shape<0>(gD)), unwrap(shape<1>(gD)))); Tensor tCcD = thr_mma.partition_C(cD); bool bias_needed = params.ptr_Bias != nullptr; bool c0_needed = (params.ptr_C0 != nullptr) && epilogue_op.is_source0_needed(); bool c1_needed = (params.ptr_C1 != nullptr) && epilogue_op.is_source1_needed(); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < size(accumulators); ++i) { if (elem_less(tCcD(i), make_coord(get<0>(residue_mnk), get<1>(residue_mnk)))) { ElementBias bias = bias_needed ? tCgBias(i) : ElementBias(0); ElementC c0 = c0_needed ? tCgC0(i) : ElementC(0); ElementC c1 = c1_needed ? tCgC1(i) : ElementC(0); tCgD(i) = epilogue_op(accumulators(i), c0, c1, bias); } } } private: Params params; ThreadEpilogueOp epilogue_op; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace collective } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/collective/epilogue_tensor_broadcast.hpp", "repo_id": "cutlass", "token_count": 4602 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing linear combination operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/platform/platform.h" #include "cutlass/epilogue/thread/activation.h" #include "cutlass/epilogue/thread/scale_type.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// // If kIsHeavy is a member, use it. Otherwise, assume that it's false. namespace { // (anonymous) template<class Op, class Enable = void> struct kIsHeavy_member_or_false { static constexpr bool value = false; }; template<class Op> struct kIsHeavy_member_or_false<Op, typename cutlass::platform::enable_if<Op::kIsHeavy>::type> { static constexpr bool value = Op::kIsHeavy; }; } // namespace (anonymous) ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { struct EmptyArguments {}; template<class T, class = void> struct ElementwiseOpDispatcher { using Arguments = EmptyArguments; T op; CUTLASS_HOST_DEVICE ElementwiseOpDispatcher(Arguments) {} template <typename ValueType> CUTLASS_HOST_DEVICE ValueType operator()(ValueType value) { return op(value); } }; template<class T> struct ElementwiseOpDispatcher<T, std::void_t<typename T::Arguments>> { using Arguments = typename T::Arguments; Arguments args; T op; CUTLASS_HOST_DEVICE ElementwiseOpDispatcher(Arguments args_):args(args_) {} template <typename ValueType> CUTLASS_HOST_DEVICE ValueType operator()(ValueType value) { return op(value, args); } }; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// This base class is meant to define the concept required of the /// EpilogueWithBroadcast::OutputOp template < typename ElementC_, typename ElementAccumulator_, typename ElementCompute_, typename ElementZ_, typename ElementT_, int ElementsPerAccess, typename ElementwiseOp_ = Identity<ElementCompute_>, typename BinaryOp_ = plus<ElementCompute_>, bool StoreT_ = true, typename ElementVector_ = ElementC_ > class LinearCombinationBiasElementwise { public: using ElementOutput = ElementC_; using ElementC = ElementC_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; using ElementZ = ElementZ_; using ElementT = ElementT_; using ElementVector = ElementVector_; static int const kElementsPerAccess = ElementsPerAccess; static int const kCount = kElementsPerAccess; using ElementwiseOp = ElementwiseOp_; using BinaryOp = BinaryOp_; using ElementwiseOpDispatcher = detail::ElementwiseOpDispatcher<ElementwiseOp>; using ElementwiseArguments = typename ElementwiseOpDispatcher::Arguments; // Indicates that this epilogue applies only one binary operation static bool const kIsSingleSource = true; using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>; using FragmentCompute = Array<ElementCompute, kElementsPerAccess>; using FragmentC = Array<ElementC, kElementsPerAccess>; using FragmentZ = Array<ElementZ, kElementsPerAccess>; using FragmentT = Array<ElementT, kElementsPerAccess>; // Definitions needed for collective epilogue using FragmentSource = FragmentC; using FragmentOutput = FragmentZ; using ElementBias = ElementVector; using FragmentBias = Array<ElementBias, kElementsPerAccess>; using ActivationFunctor = ElementwiseOp; static const ScaleType::Kind kScale = ScaleType::Default; static bool const kIsHeavy = kIsHeavy_member_or_false<ElementwiseOp>::value; /// If true, the 'Z' tensor is stored static bool const kStoreZ = true; /// If true, the 'T' tensor is stored static bool const kStoreT = StoreT_; /// Host-constructable parameters structure struct Params { ElementCompute alpha; ///< scales accumulators ElementCompute beta; ///< scales source tensor ElementCompute const *alpha_ptr; ///< pointer to accumulator scalar - if not null, loads it from memory ElementCompute const *beta_ptr; ///< pointer to source scalar - if not null, loads it from memory ElementwiseArguments elementwise; ///< Arguments for elementwise operation // // Methods // CUTLASS_HOST_DEVICE Params(): alpha(ElementCompute(1)), beta(ElementCompute(0)), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha, ElementCompute beta, ElementwiseArguments elementwise_ = ElementwiseArguments{} ): alpha(alpha), beta(beta), alpha_ptr(nullptr), beta_ptr(nullptr), elementwise(elementwise_) { } CUTLASS_HOST_DEVICE Params( ElementCompute alpha ): alpha(alpha), beta(0), alpha_ptr(nullptr), beta_ptr(nullptr) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr, ElementCompute const *beta_ptr, ElementwiseArguments elementwise_ = ElementwiseArguments{} ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(beta_ptr), elementwise(elementwise_) { } CUTLASS_HOST_DEVICE Params( ElementCompute const *alpha_ptr ): alpha(0), beta(0), alpha_ptr(alpha_ptr), beta_ptr(nullptr) { } }; private: // // Data members // ElementCompute alpha_; ElementCompute beta_; ElementwiseArguments const &elementwise_; bool skip_elementwise_; public: // // Methods // /// Constructor from Params CUTLASS_HOST_DEVICE LinearCombinationBiasElementwise(Params const &params): elementwise_(params.elementwise) { alpha_ = (params.alpha_ptr ? *params.alpha_ptr : params.alpha); beta_ = (params.beta_ptr ? *params.beta_ptr : params.beta); skip_elementwise_ = false; } /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return beta_ != ElementCompute(0); } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) { if (k_partition) { beta_ = ElementCompute(1); } if (k_partition != k_partition_count - 1) { skip_elementwise_ = true; } } /// Applies the operation when elementwise_op require arguments and is_source_needed() is true template <typename ElementwiseArgs> CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentC const &frag_C, FragmentCompute const &V, ElementwiseArgs const &elementwise_args) const { ElementwiseOp elementwise_op; BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C); FragmentCompute result_Z; FragmentCompute result_T; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i] + beta_ * tmp_C[i], V[i]); result_T[i] = z; result_Z[i] = skip_elementwise_ ? z : elementwise_op(z, elementwise_args); } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); if constexpr (kStoreT) { NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t; frag_T = convert_t(result_T); } } /// Applies the operation when elementwise_op require arguments and is_source_needed() is false template <typename ElementwiseArgs> CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentCompute const &V, ElementwiseArgs const &elementwise_args) const { ElementwiseOp elementwise_op; BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute result_Z; FragmentCompute result_T; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]); result_T[i] = z; result_Z[i] = skip_elementwise_ ? z : elementwise_op(z, elementwise_args); } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); if constexpr (kStoreT) { NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t; frag_T = convert_t(result_T); } } /// Applies the operation when is_source_needed() is true CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentC const &frag_C, FragmentCompute const &V) const { ElementwiseOpDispatcher elementwise_op(elementwise_); BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute tmp_C = NumericArrayConverter<ElementCompute, ElementC, kElementsPerAccess>()(frag_C); FragmentCompute result_Z; FragmentCompute result_T; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i] + beta_ * tmp_C[i], V[i]); result_T[i] = z; result_Z[i] = skip_elementwise_ ? z : elementwise_op(z); } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); if constexpr (kStoreT) { NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t; frag_T = convert_t(result_T); } } /// Applies the operation when is_source_needed() is false CUTLASS_HOST_DEVICE void operator()( FragmentZ &frag_Z, FragmentT &frag_T, FragmentAccumulator const &AB, FragmentCompute const &V) const { ElementwiseOpDispatcher elementwise_op(elementwise_); BinaryOp binary_op; FragmentCompute tmp_Accum = NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB); FragmentCompute result_Z; FragmentCompute result_T; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kElementsPerAccess; ++i) { ElementCompute z = binary_op(alpha_ * tmp_Accum[i], V[i]); result_T[i] = z; result_Z[i] = skip_elementwise_ ? z : elementwise_op(z); } NumericArrayConverter<ElementZ, ElementCompute, kElementsPerAccess> convert_z; frag_Z = convert_z(result_Z); if constexpr (kStoreT) { NumericArrayConverter<ElementT, ElementCompute, kElementsPerAccess> convert_t; frag_T = convert_t(result_T); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/linear_combination_bias_elementwise.h", "repo_id": "cutlass", "token_count": 4484 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using WMMA. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/gemm/gemm.h" #include "cutlass/epilogue/thread/linear_combination.h" #include "cutlass/epilogue/thread/linear_combination_clamp.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/epilogue/thread/linear_combination_gelu.h" #include "cutlass/epilogue/thread/linear_combination_sigmoid.h" #include "cutlass/epilogue/thread/linear_combination_planar_complex.h" #include "cutlass/epilogue/thread/conversion_op.h" #include "cutlass/epilogue/thread/reduction_op.h" #include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h" #include "cutlass/epilogue/warp/fragment_iterator_wmma_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_wmma_tensor_op.h" #include "cutlass/epilogue/threadblock/default_thread_map_wmma_tensor_op.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/epilogue/threadblock/shared_load_iterator.h" #include "cutlass/epilogue/threadblock/epilogue.h" #include "cutlass/layout/permute.h" //////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { //////////////////////////////////////////////////////////////////////////////// /// Defines sensible defaults for epilogues for WMMA TensorOps. template < typename Shape_, typename WarpMmaTensorOp_, int PartitionsK, typename OutputOp_, int ElementsPerAccess, bool ScatterD = false, typename PermuteDLayout = layout::NoPermute > struct DefaultEpilogueWmmaTensorOp { using Shape = Shape_; using WarpMmaTensorOp = WarpMmaTensorOp_; static int const kPartitionsK = PartitionsK; using OutputOp = OutputOp_; static int const kElementsPerAccess = ElementsPerAccess; using ElementOutput = typename OutputOp::ElementOutput; using LayoutC = typename WarpMmaTensorOp::LayoutC; using ElementAccumulator = typename WarpMmaTensorOp::ElementC; // // Thread map // using OutputTileThreadMap = typename cutlass::epilogue::threadblock::DefaultThreadMapWmmaTensorOp< Shape, typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, kPartitionsK, ElementOutput, kElementsPerAccess >::Type; using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIterator< OutputTileThreadMap, ElementOutput, ScatterD, PermuteDLayout >; using AccumulatorFragmentIterator = cutlass::epilogue::warp::FragmentIteratorWmmaTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::ElementC, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC >; using WarpTileIterator = cutlass::epilogue::warp::TileIteratorWmmaTensorOp< typename WarpMmaTensorOp::Shape, typename WarpMmaTensorOp::Policy::Operator::Shape, typename WarpMmaTensorOp::Policy::Operator::FragmentC, LayoutC >; using SharedLoadIterator = cutlass::epilogue::threadblock::SharedLoadIterator< typename OutputTileThreadMap::CompactedThreadMap, ElementAccumulator >; /// Hard-coded padding elements added using Padding = typename WarpTileIterator::Padding; // // Define the epilogue // using Epilogue = cutlass::epilogue::threadblock::Epilogue< Shape, WarpMmaTensorOp, kPartitionsK, OutputTileIterator, AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator, OutputOp, Padding >; }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_epilogue_wmma_tensor_op.h", "repo_id": "cutlass", "token_count": 1811 }
24
/*************************************************************************************************** * Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Threadblock-level epilogue computing: Aux = ((alpha * scale_a * scale_b) * accumulator) + ((beta * scale_c) * source) + bias D = activation(Aux) if Aux is fp8 type: abs_max_output = max( abs(aux) | (for every aux in Aux)) Aux = scale_aux * Aux endif if D is fp8 type: abs_max_output = max( abs(d) | (for every d in D)) D = scale_d * D endif Parameter Aux is optionally stored to global memory */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #include <cuda/std/utility> #else #include <assert.h> #include <utility> #endif #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_coord.h" #include "cutlass/aligned_buffer.h" #include "cutlass/functional.h" #include "cutlass/fast_math.h" #include "cutlass/layout/vector.h" #include "cutlass/layout/tensor.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/numeric_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Helper class for keeping track of absolute maximums and performing scaling template < typename Iterator, // Iterator type used for storing the data for which absolute maximum and scaling // will be computed. This type is used for predicating absolute maximum calculations. typename Fragment, // Type of input to be computed on bool ScalingAndAmaxNeeded // Whether to perform absolute maximum and scaling operations > struct ScalingAndAmaxHelper; /// Partial specialization that does not perform scaling or calculate an absolute maximum template <typename Iterator, typename Fragment> struct ScalingAndAmaxHelper<Iterator, Fragment, false> { using Element = typename Fragment::Element; CUTLASS_HOST_DEVICE ScalingAndAmaxHelper(Element scale) { } CUTLASS_DEVICE Fragment operator()(const Iterator& iterator, const Fragment& inp) { return inp; } CUTLASS_HOST_DEVICE Element get_abs_max() const { return Element(0.); } CUTLASS_HOST_DEVICE void set_scaling_factor(Element scale_) { } }; /// Partial specialization that keeps track of an absolute maximum value of inputs seen /// and scales inputs template <typename Iterator, typename Fragment> struct ScalingAndAmaxHelper<Iterator, Fragment, true> { using Element = typename Fragment::Element; using AccessType = typename Iterator::AccessType; using ThreadMap = typename Iterator::ThreadMap; Element abs_max; Element scale; // Operators maximum_with_nan_propogation<Element> max_op; absolute_value_op<Element> abs_op; multiplies<Fragment> multiply; CUTLASS_HOST_DEVICE ScalingAndAmaxHelper(Element scale_) : abs_max(0.), scale(scale_) { } // Compute the absolute maximum value between `abs_max` and the entries // of `frag` for predicated-on entries of `iterator`. Return a scaled // version of `inp`. CUTLASS_DEVICE Fragment operator()(const Iterator& iterator, const Fragment& frag) { using PredicateGroup = Array<Element, Iterator::ThreadMap::kElementsPerAccess>; PredicateGroup const *frag_ptr = reinterpret_cast<PredicateGroup const *>(&frag); typename Iterator::Mask mask; iterator.get_mask(mask); CUTLASS_PRAGMA_UNROLL for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { CUTLASS_PRAGMA_UNROLL for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { CUTLASS_PRAGMA_UNROLL for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { int frag_row_idx = (row + ThreadMap::Iterations::kRow * (group + ThreadMap::Iterations::kGroup * cluster)); int row_offset = row * ThreadMap::Delta::kRow + group * ThreadMap::Delta::kGroup + cluster * ThreadMap::Delta::kCluster; bool row_guard = ((row_offset + iterator.thread_start_row()) < iterator.extent_row()); CUTLASS_PRAGMA_UNROLL for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { bool guard = row_guard && mask.predicates[column]; if (guard) { int access_idx = frag_row_idx * ThreadMap::Iterations::kColumn + column; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < PredicateGroup::kElements; ++i) { abs_max = max_op(abs_max, abs_op(frag_ptr[access_idx][i])); } } } } } } // Perform scaling return multiply(scale, frag); } CUTLASS_HOST_DEVICE Element get_abs_max() const { return abs_max; } CUTLASS_HOST_DEVICE void set_scaling_factor(Element scale_) { scale = scale_; } }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, ///< Shape of threadblock tile (concept: GemmShape) typename WarpMmaOperator_, ///< Warp-level MMA operator (concept: gemm::warp::MmaTensorOp) int PartitionsK, ///< Number of partitions of the K dimension typename OutputTileIterator_, ///< Tile iterator reading and writing output tensors typename AuxOutputTileIterator_, ///< Tile iterator writing auxiliary output tensors typename ElementVector_, ///< Data type of bias vector typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting accumulators typename WarpTileIterator_, ///< Warp-scoped tile iterator writing accumulators to SMEM typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading from SMEM typename OutputOp_, ///< Output operator typename Padding_, ///< Padding added to SMEM allocation to avoid bank conflicts (concept: MatrixShape) int FragmentsPerPartition = 1, ///< Used to coarsen the epilogue granularity int IterationsUnroll = ///< Used to reduce binary size when epilogue op is large (!IsEpilogueFunctorHeavy<OutputOp_>::value) > class EpilogueWithAbsMax : public EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition> { public: using Base = EpilogueBase< Shape_, typename WarpMmaOperator_::Shape, PartitionsK, AccumulatorFragmentIterator_, WarpTileIterator_, Padding_, FragmentsPerPartition>; static bool const kIsSingleSource = true; using Shape = Shape_; using WarpMmaOperator = WarpMmaOperator_; static int const kPartitionsK = PartitionsK; using OutputTileIterator = OutputTileIterator_; using AuxOutputTileIterator = AuxOutputTileIterator_; using ElementVector = ElementVector_; using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; using WarpTileIterator = WarpTileIterator_; using SharedLoadIterator = SharedLoadIterator_; using OutputOp = OutputOp_; using Padding = Padding_; using Layout = layout::RowMajor; using LongIndex = typename Layout::LongIndex; /// The complete warp-level accumulator tile using AccumulatorTile = typename Base::AccumulatorTile; /// Accumulator element using ElementAccumulator = typename WarpTileIterator::Element; /// Data type used for absolute maximum value using ElementAbsmax = typename OutputOp::ElementAbsmax; /// Compute data type produced by the output op using ElementCompute = typename OutputOp::ElementCompute; /// Compute fragment using FragmentCompute = Array<ElementCompute, OutputTileIterator::Fragment::kElements>; /// Helpers for (optionally) computing absolute maximums and scaling output and auxiliary output using OutputScaler = detail::ScalingAndAmaxHelper<OutputTileIterator, FragmentCompute, OutputOp::kIsScalingAndAmaxOutputNeeded>; using AuxOutputScaler = detail::ScalingAndAmaxHelper<AuxOutputTileIterator, FragmentCompute, OutputOp::kIsScalingAndAmaxAuxOutputNeeded>; /// Thread map used by output tile iterators using ThreadMap = typename OutputTileIterator::ThreadMap; /// Fragment object used to store the broadcast values using BroadcastFragment = Array< ElementCompute, ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess>; /// Output element using ElementOutput = typename OutputTileIterator::Element; /// Data type of auxiliary output using ElementAuxOutput = typename AuxOutputTileIterator::Element; /// Output access size static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; /// Tensor reference to destination tensor using TensorRef = typename OutputTileIterator::TensorRef; /// Tensor reference to sync tensor using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>; /// Const tensor reference to source tensor using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; /// Array type used to output using OutputAccessType = Array< typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using AccumulatorAccessType = Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>; /// Array type used by output functor using ComputeAccessType = Array<ElementCompute, OutputTileIterator::kElementsPerAccess>; /// Auxiliary output access type using AuxAccessType = Array<ElementAuxOutput, OutputTileIterator::kElementsPerAccess>; /// Number of warps using WarpCount = typename Base::WarpCount; /// Shared memory allocation from epilogue base class using BaseSharedStorage = typename Base::SharedStorage; static int constexpr kSmemTiles = Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; static int constexpr kSmemPointerOffset = Base::SharedStorage::StorageShape::kCount / kSmemTiles; /// Used for the broadcast struct BroadcastDetail { /// Number of threads per warp static int const kWarpSize = 32; static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; /// Number of distinct scalar column indices handled by each thread static int const kColumnsPerThread = ThreadMap::Iterations::kColumn * ThreadMap::kElementsPerAccess; /// Number of distinct scalar row indices handled by each thread static int const kRowsPerThread = ThreadMap::Iterations::kCount / ThreadMap::Iterations::kColumn; /// Number of threads per threadblock static int const kThreadCount = kWarpSize * WarpCount::kCount; /// Number of distinct threads per row of output tile static int const kThreadsPerRow = (Shape::kN / kColumnsPerThread); /// Number of distinct threads which must be reduced during the final reduction phase within the threadblock. static int const kThreadRows = kThreadCount / kThreadsPerRow; /// I'm not sure what I meant here. static int const kThreadAccessesPerRow = const_max(1, (Shape::kN + kThreadCount - 1) / kThreadCount); /// Shape of the shared memory allocation for the epilogue using StorageShape = MatrixShape< kThreadRows, Shape::kN >; /// Debug printing CUTLASS_DEVICE static void print() { #if 0 printf("BroadcastDetail {\n"); printf( " kColumnsPerThread: %d\nkRowsPerThread: %d\n,kThreadCount: %d\nkThreadsPerRow: %d\n" "kThreadRows: %d\nThreadAccessesPerRow: %d\nStorageShape: %d x %d (count: %d)\n", kColumnsPerThread, kRowsPerThread, kThreadCount, kThreadsPerRow, kThreadRows, kThreadAccessesPerRow, StorageShape::kRow, StorageShape::kColumn, StorageShape::kCount ); printf("};\n"); #endif } }; /// Shared storage structure (shadows base) with additional SMEM buffer for reduction struct SharedStorage { union { BaseSharedStorage base; }; CUTLASS_HOST_DEVICE SharedStorage() { } }; public: static_assert(SharedLoadIterator::Fragment::kElements == OutputTileIterator::Fragment::kElements, "Mismatch between shared load iterator and output tile iterator."); static_assert(OutputTileIterator::kElementsPerAccess, "OutputTileIterator::kElementsPerAccess must not be zero."); static_assert(!(OutputTileIterator::Fragment::kElements % OutputTileIterator::kElementsPerAccess), "Divisibility"); private: /// Loads fragment from shared memory aligned with output tensor SharedLoadIterator shared_load_iterator_; /// Thread index within the threadblock int thread_idx_; public: /// Constructor CUTLASS_DEVICE EpilogueWithAbsMax( SharedStorage &shared_storage, ///< Shared storage object int thread_idx, ///< ID of a thread within the threadblock int warp_idx, ///< ID of warp within threadblock int lane_idx ///< Id of thread within warp ): Base(shared_storage.base, thread_idx, warp_idx, lane_idx), shared_load_iterator_(shared_storage.base.reference(), thread_idx), thread_idx_(thread_idx) { } /// Streams the result to global memory CUTLASS_DEVICE void operator()( OutputOp &output_op, ///< Output operator ElementVector const * broadcast_ptr, ///< Broadcast vector OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix AuxOutputTileIterator aux_iterator, ///< Tile iterator for destination auxiliary output MatrixCoord const &problem_size = ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord(Shape::kM, Shape::kN), MatrixCoord const &threadblock_offset = ///< Threadblock's initial offset within the problem size space MatrixCoord()) { BroadcastFragment broadcast_fragment; load_broadcast_fragment_(broadcast_fragment, broadcast_ptr, problem_size, threadblock_offset); OutputScaler output_scaler(output_op.get_scale_d()); AuxOutputScaler aux_scaler(output_op.get_scale_aux()); if (!output_op.is_source_needed()) { compute_source_not_needed_( output_op, broadcast_fragment, destination_iterator, accumulators, aux_iterator, output_scaler, aux_scaler); } else { compute_source_needed_( output_op, broadcast_fragment, destination_iterator, accumulators, source_iterator, aux_iterator, output_scaler, aux_scaler); } // Store the absolute maximum values of the output and auxiliar tensors, if needed. if (output_op.get_ptr_output_abs_max() != nullptr) { ElementAbsmax local_abs_max = NumericConverter<ElementAbsmax, ElementCompute, OutputOp::kRound>{}(output_scaler.get_abs_max()); atomic_maximum<ElementAbsmax>{}( output_op.get_ptr_output_abs_max(), local_abs_max); } if (output_op.get_ptr_aux_output_abs_max() != nullptr) { ElementAbsmax local_abs_max = NumericConverter<ElementAbsmax, ElementCompute, OutputOp::kRound>{}(aux_scaler.get_abs_max()); atomic_maximum<ElementAbsmax>{}( output_op.get_ptr_aux_output_abs_max(), local_abs_max); } } private: CUTLASS_DEVICE void load_broadcast_fragment_( BroadcastFragment & broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns ElementVector const * broadcast_ptr, ///< Broadcast vector MatrixCoord const &problem_size, ///< Problem size needed to guard against out-of-bounds accesses MatrixCoord const &threadblock_offset ///< Threadblock's initial offset within the problem size space ) { broadcast_fragment.clear(); // If no pointer is supplied, set with all zeros and avoid memory accesses if (!broadcast_ptr) { return; } int thread_initial_column = ThreadMap::initial_offset(thread_idx_).column(); int thread_column_idx = threadblock_offset.column() + thread_initial_column; broadcast_ptr += thread_initial_column; NumericArrayConverter<ElementCompute, ElementVector, BroadcastDetail::kElementsPerAccess> converter; using AccessType = AlignedArray<ElementVector, BroadcastDetail::kElementsPerAccess>; using ComputeFragmentType = Array<ElementCompute, BroadcastDetail::kElementsPerAccess>; ComputeFragmentType *frag_ptr = reinterpret_cast<ComputeFragmentType *>(&broadcast_fragment); CUTLASS_PRAGMA_UNROLL for (int j = 0; j < ThreadMap::Iterations::kColumn; ++j) { AccessType loaded; loaded.clear(); if (thread_column_idx < problem_size.column()) { loaded = *reinterpret_cast<AccessType const *>(broadcast_ptr); } ComputeFragmentType cvt = converter(loaded); frag_ptr[j] = cvt; thread_column_idx += ThreadMap::Delta::kColumn; broadcast_ptr += ThreadMap::Delta::kColumn; } } template <class Seq> struct acc2smem_source_not_needed; template <size_t... Seq> struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> { template <int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); ++accum_fragment_iterator; warp_tile_iterator.store(accum_fragment); if (p < Base::kFragmentsPerIteration - 1) { warp_tile_iterator.add_pointer_offset(kSmemPointerOffset); } } if (Base::kFragmentsPerIteration > 1) { warp_tile_iterator.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = { (pos == (Seq * Base::kFragmentsPerIteration)) && (helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator), 0)...}; CUTLASS_UNUSED(dummy[0]); } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_not_needed_( OutputOp &output_op, ///< Output operator BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile AuxOutputTileIterator aux_iterator, ///< Tile iterator for destination auxiliary output OutputScaler& output_scaler, ///< Helper for (optionally) computing the absolute maximum and scaling output AuxOutputScaler& aux_scaler ///< Helper for (optionally) computing the absolute maximum and scaling the auxiliary output ) { // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // // CUTLASS_PRAGMA_UNROLL #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; iter += Base::kFragmentsPerIteration) { // // Convert and store fragment // __syncthreads(); acc2smem_source_not_needed< cutlass::make_index_sequence<OutputTileIterator::kIterations / Base::kFragmentsPerIteration>>::push(iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // CUTLASS_PRAGMA_UNROLL for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); if (p < Base::kFragmentsPerIteration - 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); } else if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); } // // Apply output operation // FragmentCompute frag_Z_compute; FragmentCompute frag_Aux_compute; apply_output_operator_source_not_needed_( frag_Z_compute, frag_Aux_compute, output_op, aligned_accum_fragment[0], broadcast_fragment); // // Conditionally store fragments // // (Optionally) compute the absolute maximum of frag_Z and scale frag_Z frag_Z_compute = output_scaler(destination_iterator, frag_Z_compute); NumericArrayConverter<typename OutputTileIterator::Fragment::Element, ElementCompute, OutputTileIterator::Fragment::kElements> cvt_to_dst; typename OutputTileIterator::Fragment frag_Z = cvt_to_dst(frag_Z_compute); // Always store the output destination_iterator.store(frag_Z); ++destination_iterator; // Only store the auxiliary output if scaling and absolute-maximum calculation were needed if (OutputOp::kIsScalingAndAmaxAuxOutputNeeded) { frag_Aux_compute = aux_scaler(aux_iterator, frag_Aux_compute); NumericArrayConverter<typename AuxOutputTileIterator::Fragment::Element, ElementCompute, AuxOutputTileIterator::Fragment::kElements> cvt_to_aux; typename AuxOutputTileIterator::Fragment frag_Aux = cvt_to_aux(frag_Aux_compute); aux_iterator.store(frag_Aux); ++aux_iterator; } } if (Base::kFragmentsPerIteration > 1) { shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * (1 - Base::kFragmentsPerIteration)); } } } template<class Seq> struct acc2smem_source_needed; template <size_t... Seq> struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> { template<int Advance> CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, WarpTileIterator &warp_tile_iterator) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } typename AccumulatorFragmentIterator::Fragment accum_fragment; accum_fragment_iterator.load(accum_fragment); warp_tile_iterator.store(accum_fragment); } CUTLASS_DEVICE static void push(size_t pos, AccumulatorFragmentIterator const &iterator_begin, WarpTileIterator &warp_tile_iterator) { int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...}; } }; /// Streams the result to global memory CUTLASS_DEVICE void compute_source_needed_( OutputOp &output_op, ///< Output operator BroadcastFragment const &broadcast_fragment, ///< Fragment containing the accumulated partial reduction over columns OutputTileIterator destination_iterator, ///< Tile iterator for destination AccumulatorTile const &accumulators, ///< Complete warp-level accumulator tile OutputTileIterator source_iterator, ///< Tile iterator for source accumulator matrix AuxOutputTileIterator aux_iterator, ///< Tile iterator for destination auxiliary output OutputScaler& output_scaler, ///< Helper for (optionally) computing the absolute maximum and scaling output AuxOutputScaler& aux_scaler ///< Helper for (optionally) computing the absolute maximum and scaling the auxiliary output ) { typename OutputTileIterator::Fragment source_fragment; source_fragment.clear(); // // Iterator over warp-level accumulator fragment // AccumulatorFragmentIterator accum_fragment_iterator(accumulators); // // Iterate over accumulator tile // #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { // // Load the source // source_iterator.load(source_fragment); ++source_iterator; // // Convert and store fragment // __syncthreads(); acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::push( iter, accum_fragment_iterator, this->warp_tile_iterator_); __syncthreads(); // // Load fragments from shared memory // typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; shared_load_iterator_.load(aligned_accum_fragment[0]); // If the number of k-slices is > 1 - perform a reduction amongst the k-slices if (kPartitionsK > 1) { plus <typename SharedLoadIterator::Fragment> add_fragments; const int tile_row_offset = Base::SharedStorage::StorageShape::kRow / PartitionsK; CUTLASS_PRAGMA_UNROLL for ( int i = 1; i < kPartitionsK; ++i) { shared_load_iterator_.add_tile_offset({tile_row_offset , 0}); shared_load_iterator_.load(aligned_accum_fragment[i]); aligned_accum_fragment[0] = add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); } shared_load_iterator_.add_tile_offset({-1 * (kPartitionsK-1) * tile_row_offset, 0}); } // // Apply output operation // FragmentCompute frag_Z_compute; FragmentCompute frag_Aux_compute; apply_output_operator_( frag_Z_compute, frag_Aux_compute, output_op, aligned_accum_fragment[0], source_fragment, broadcast_fragment); // // Conditionally store fragments // // (Optionally) compute the absolute maximum of frag_Z and scale frag_Z frag_Z_compute = output_scaler(destination_iterator, frag_Z_compute); NumericArrayConverter<typename OutputTileIterator::Fragment::Element, ElementCompute, OutputTileIterator::Fragment::kElements> cvt_to_dst; typename OutputTileIterator::Fragment frag_Z = cvt_to_dst(frag_Z_compute); // Always store the output destination_iterator.store(frag_Z); ++destination_iterator; // Only store the auxiliary output if scaling and absolute-maximum calculation were needed if (OutputOp::kIsScalingAndAmaxAuxOutputNeeded) { frag_Aux_compute = aux_scaler(aux_iterator, frag_Aux_compute); NumericArrayConverter<typename AuxOutputTileIterator::Fragment::Element, ElementCompute, AuxOutputTileIterator::Fragment::kElements> cvt_to_aux; typename AuxOutputTileIterator::Fragment frag_Aux = cvt_to_aux(frag_Aux_compute); aux_iterator.store(frag_Aux); ++aux_iterator; } } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_( FragmentCompute &frag_Z, FragmentCompute &frag_Aux, OutputOp &output_op, typename SharedLoadIterator::Fragment const &frag_AB, typename OutputTileIterator::Fragment const &frag_C, BroadcastFragment const &frag_Broadcast) { using AccessTypeZ = Array<ElementCompute, kElementsPerAccess>; using AccessTypeAux = Array<ElementCompute, kElementsPerAccess>; using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>; AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z); AccessTypeAux *frag_Aux_ptr = reinterpret_cast<AccessTypeAux *>(&frag_Aux); AccumulatorAccessType const *frag_AB_ptr = reinterpret_cast<AccumulatorAccessType const *>(&frag_AB); OutputAccessType const *frag_C_ptr = reinterpret_cast<OutputAccessType const *>(&frag_C); AccessTypeBroadcast const *frag_Broadcast_ptr = reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { output_op( frag_Z_ptr[i], frag_Aux_ptr[i], frag_AB_ptr[i], frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn], frag_C_ptr[i]); } } /// Helper to invoke the output functor over each vector of output CUTLASS_DEVICE void apply_output_operator_source_not_needed_( FragmentCompute &frag_Z, FragmentCompute &frag_Aux, OutputOp &output_op, typename SharedLoadIterator::Fragment const &frag_AB, BroadcastFragment const &frag_Broadcast) { using AccessTypeZ = Array<ElementCompute, kElementsPerAccess>; using AccessTypeAux = Array<ElementCompute, kElementsPerAccess>; using AccessTypeBroadcast = Array<ElementCompute, kElementsPerAccess>; AccessTypeZ *frag_Z_ptr = reinterpret_cast<AccessTypeZ *>(&frag_Z); AccessTypeAux *frag_Aux_ptr = reinterpret_cast<AccessTypeAux *>(&frag_Aux); AccumulatorAccessType const *frag_AB_ptr = reinterpret_cast<AccumulatorAccessType const *>(&frag_AB); AccessTypeBroadcast const *frag_Broadcast_ptr = reinterpret_cast<AccessTypeBroadcast const *>(&frag_Broadcast); int const kOutputOpIterations = OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kOutputOpIterations; ++i) { output_op( frag_Z_ptr[i], frag_Aux_ptr[i], frag_AB_ptr[i], frag_Broadcast_ptr[i % ThreadMap::Iterations::kColumn]); } } }; //////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/epilogue_with_absmax.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/epilogue_with_absmax.h", "repo_id": "cutlass", "token_count": 12927 }
25
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/fast_math.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < int Rank > struct PredicatedTileIteratorAffineLayoutRankNParams { using Layout = layout::AffineRankN<Rank>; using TensorCoord = typename Layout::TensorCoord; static bool const kBigEndian = false; // // Data members // Layout layout; /// Stride in units of bytes along M modes Coord<Layout::kRank/2, typename Layout::LongIndex> stride_m; /// Stride in units of bytes along N modes Coord<Layout::kRank/2, typename Layout::LongIndex> stride_n; /// Fast divmod objects divided by tensor extents FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)]; /// Fast divmod objects divided by tensor extents FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank/2 - 1)]; int64_t rank2_inc_col; int64_t rank2_inc_row; // // Methods // CUTLASS_HOST_DEVICE PredicatedTileIteratorAffineLayoutRankNParams() { } CUTLASS_HOST_DEVICE PredicatedTileIteratorAffineLayoutRankNParams(TensorCoord const &extent, Layout const &layout_, int64_t element_sizeof_bits) : layout(layout_) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2; ++i) { stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits); stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits); } if (kBigEndian) { // "Big Endian" scheme CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2 - 1; ++i) { divmod_m[i] = FastDivmod(extent[i + 1]); divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]); } } else { // "Little Endian" scheme CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2 - 1; ++i) { divmod_m[i] = FastDivmod(extent[i]); divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]); } } #if 0 // // Debug print statements to verify extents and strides are passed correctly. // printf("PredicatedTileIteratorAffine::Params() entered\n"); CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank; ++i) { printf(" extent[%d]: %d\n", i, extent[i]); } for (int i = 0; i < Layout::kRank; ++i) { printf(" stride[%d]: %ld\n", i, layout_.stride()[i]); } printf("PredicatedTileIteratorAffine::Params() returning\n"); #endif } CUTLASS_HOST_DEVICE PredicatedTileIteratorAffineLayoutRankNParams(Layout const &layout_, int32_t threadmap_delta_kColumn, int32_t threadmap_delta_kRow, int64_t element_sizeof_bits) : layout(layout_) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < Layout::kRank / 2; ++i) { stride_m[i] = OffsetBytes(layout_.stride()[i], element_sizeof_bits); stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2], element_sizeof_bits); } rank2_inc_col = threadmap_delta_kColumn * stride_n[0]; rank2_inc_row = threadmap_delta_kRow * stride_m[0]; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/predicated_tile_iterator_affine_layout_params.h", "repo_id": "cutlass", "token_count": 2047 }
26
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines basic structures needed for implementing the warp-scoped phase of the epilogue. These quantities assume a 'column-major' arrangement of SimtOp instructions, of which a row-oriented slice is visible per iteration. */ #pragma once #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename WarpShape, ///< shape of warp-level GEMM (concept: GemmShape) typename Operator, ///< matrix multiply operation (concept: arch::Mma) typename Layout, ///< destination layout in shared memory typename MmaSimtPolicy ///< policy defining lane arrangement (concept: MmaSimtPolicy) > struct SimtPolicy; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial specialization for row-major template < typename WarpShape_, ///< shape of warp-level GEMM (concept: MatrixShape) typename Operator_, ///< matrix multiply operation (concept: arch::Mma) typename MmaSimtPolicy_ ///< policy defining lane arrangement (concept: MmaSimtPolicy) > struct SimtPolicy<WarpShape_, Operator_, layout::RowMajor, MmaSimtPolicy_> { using WarpShape = WarpShape_; using Operator = Operator_; using MmaSimtPolicy = MmaSimtPolicy_; static_assert(!(WarpShape::kM % MmaSimtPolicy::WarpShape::kRow), "Divisibility"); static_assert(!(WarpShape::kN % MmaSimtPolicy::WarpShape::kColumn), "Divisibility"); /// Number of iterations static int const kIterations = WarpShape::kM / MmaSimtPolicy::WarpShape::kRow; /// Number of accumulators written per iteration static int const kElementsPerIteration = (WarpShape::kN / MmaSimtPolicy::WarpShape::kColumn); /// Total number of accumulators static int const kAccumulatorElementCount = kElementsPerIteration * kIterations; /// Number of consecutive elements static int const kElementsPerAccess = MmaSimtPolicy::LaneMmaShape::kN; /// Number of rows per epilogue iteration static int const kRowsPerIteration = MmaSimtPolicy::WarpShape::kRow; /// Number of accesses made in one iteration static int const kAccessesPerIteration = kElementsPerIteration / kElementsPerAccess; /// Number of elements in between accumulator chunks of (LaneMmaShape::kM x LaneMmaShape::kN) using Delta = MatrixShape< MmaSimtPolicy::WarpShape::kRow * MmaSimtPolicy::LaneMmaShape::kM, MmaSimtPolicy::WarpShape::kColumn * MmaSimtPolicy::LaneMmaShape::kN >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/warp/simt_policy.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/warp/simt_policy.h", "repo_id": "cutlass", "token_count": 1339 }
27
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level Rank 2k definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. Note, CUTLASS epilogues universally target row-major outputs. Column-major outputs are accommodated by exchanging A and B operands and assuming transposed layouts. */ #pragma once #include "cutlass/blas3.h" #include "cutlass/complex.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/kernel/rank_2k_universal.h" #include "cutlass/gemm/kernel/default_rank_2k.h" #include "cutlass/gemm/kernel/default_rank_2k_complex.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Element type for A matrix operand typename ElementA_, /// Layout type for A matrix operand typename LayoutA_, /// Complex elementwise transformation on A operand ComplexTransform TransformA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB_, /// Layout type for B matrix operand typename LayoutB_, /// Complex elementwise transformation on B operand ComplexTransform TransformB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC_, /// Layout type for C and D matrix operands typename LayoutC_, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC_, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by SYRK typename Operator, /// Blas3 computation mode (symmetric/hermitian) BlasMode BlasMode_ = BlasMode::kSymmetric, /// typename Enable = void > struct DefaultRank2KUniversal; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Real-valued Rank 2k update kernels // template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by Rank2k typename Operator> struct DefaultRank2KUniversal< ElementA, LayoutA, ComplexTransform::kNone, // transform A kAlignmentA, ElementB, LayoutB, ComplexTransform::kNone, // transform B kAlignmentB, ElementC, LayoutC, FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, BlasMode::kSymmetric, typename platform::enable_if< ! cutlass::is_complex<ElementAccumulator>::value>::type > { using DefaultRank2Kkernel = typename kernel::DefaultRank2K< ElementA, LayoutA, kAlignmentA, ElementB, LayoutB, kAlignmentB, ElementC, LayoutC, FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, BlasMode::kSymmetric >::Rank2Kkernel; /// Define the kernel in terms of the default kernel using Rank2Kkernel = kernel::Rank2KUniversal< typename DefaultRank2Kkernel::Mma1, typename DefaultRank2Kkernel::Mma2, typename DefaultRank2Kkernel::Epilogue, ThreadblockSwizzle, FillModeC, BlasMode::kSymmetric >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // // Complex-valued Rank 2K update kernels // template < /// Element type for A matrix operand typename ElementA, /// Layout type for A matrix operand typename LayoutA, /// Complex elementwise transformation on A operand ComplexTransform TransformA, /// Access granularity of A matrix in units of elements int kAlignmentA, /// Element type for B matrix operand typename ElementB, /// Layout type for B matrix operand typename LayoutB, /// Complex elementwise transformation on B operand ComplexTransform TransformB, /// Access granularity of B matrix in units of elements int kAlignmentB, /// Element type for C and D matrix operands typename ElementC, /// Layout type for C and D matrix operands typename LayoutC, /// Fill Mode for C (kLower or kUpper) FillMode FillModeC, /// Element type for internal accumulation typename ElementAccumulator, /// Operator class tag typename OperatorClass, /// Tag indicating architecture to tune for typename ArchTag, /// Threadblock-level tile size (concept: GemmShape) typename ThreadblockShape, /// Warp-level tile size (concept: GemmShape) typename WarpShape, /// Warp-level tile size (concept: GemmShape) typename InstructionShape, /// Epilogue output operator typename EpilogueOutputOp, /// Threadblock-level swizzling operator typename ThreadblockSwizzle, /// Number of stages used in the pipelined mainloop int Stages, /// If true, kernel is configured to support serial reduction in the /// epilogue bool SplitKSerial, /// Operation performed by SYRK typename Operator, // BlasMode BlasMode kBlasMode > struct DefaultRank2KUniversal< ElementA, LayoutA, TransformA, kAlignmentA, ElementB, LayoutB, TransformB, kAlignmentB, ElementC, LayoutC, FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, SplitKSerial, Operator, kBlasMode, typename platform::enable_if<cutlass::is_complex<ElementAccumulator>::value>::type > { using DefaultRank2Kkernel = typename kernel::DefaultRank2KComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ElementAccumulator, OperatorClass, ArchTag, ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, ThreadblockSwizzle, Stages, TransformA, TransformB, Operator, SplitKSerial, kBlasMode >::Rank2Kkernel; /// Define the kernel in terms of the default kernel using Rank2Kkernel = kernel::Rank2KUniversal< typename DefaultRank2Kkernel::Mma1, typename DefaultRank2Kkernel::Mma2, typename DefaultRank2Kkernel::Epilogue, ThreadblockSwizzle, FillModeC, kBlasMode >; }; } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/default_rank_2k_universal.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/default_rank_2k_universal.h", "repo_id": "cutlass", "token_count": 3345 }
28
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/fast_math.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_coord.h" #include "cutlass/complex.h" #include "cutlass/semaphore.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/gemm/kernel/params_universal_base.h" #include "cutlass/trace.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Mma_, ///! Threadblock-scoped matrix multiply-accumulate typename Epilogue_, ///! Epilogue typename EpilogueGemmKReduction_, ///! Epilogue typename ThreadblockSwizzle_ ///! Threadblock swizzling function > struct GemmWithKReduction { public: using Mma = Mma_; using Epilogue = Epilogue_; using EpilogueOutputOp = typename Epilogue::OutputOp; using EpilogueGemmKReduction = EpilogueGemmKReduction_; using ThreadblockSwizzle = ThreadblockSwizzle_; using ElementA = typename Mma::IteratorA::Element; using LayoutA = typename Mma::IteratorA::Layout; using ElementB = typename Mma::IteratorB::Element; using LayoutB = typename Mma::IteratorB::Layout; using ElementC = typename Epilogue::OutputTileIterator::Element; using LayoutC = typename Epilogue::OutputTileIterator::Layout; using LayoutGemmKReduction = cutlass::layout::PitchLinear; static ComplexTransform const kTransformA = Mma::kTransformA; static ComplexTransform const kTransformB = Mma::kTransformB; using Operator = typename Mma::Operator; using OperatorClass = typename Mma::Operator::OperatorClass; using ThreadblockShape = typename Mma::Shape; using WarpShape = typename Mma::Operator::Shape; using InstructionShape = typename Mma::Policy::Operator::InstructionShape; using ArchTag = typename Mma::ArchTag; static int const kStages = Mma::kStages; static int const kAlignmentA = Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = Epilogue::OutputTileIterator::kElementsPerAccess; /// Warp count (concept: GemmShape) using WarpCount = typename Mma::WarpCount; static int const kThreadCount = 32 * WarpCount::kCount; /// Split-K preserves splits that are 128b aligned static int const kSplitKAlignment = const_max(128 / sizeof_bits<ElementA>::value, 128 / sizeof_bits<ElementB>::value); static int const kReduceKForA = Mma::kReduceKForA; // // Structures // /// Argument structure struct Arguments : UniversalArgumentsBase { // // Data members // typename EpilogueOutputOp::Params epilogue; void const * ptr_A; void const * ptr_B; void const * ptr_C; void * ptr_D; void * ptr_gemm_k_reduction; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int64_t batch_stride_gemm_k_reduction; typename LayoutA::Stride::Index lda; typename LayoutB::Stride::Index ldb; typename LayoutC::Stride::Index ldc; typename LayoutC::Stride::Index ldd; typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction; // // Methods // Arguments() : ptr_A(nullptr), ptr_B(nullptr), ptr_C(nullptr), ptr_D(nullptr), ptr_gemm_k_reduction(nullptr) {} /// constructs an arguments structure Arguments( GemmUniversalMode mode, GemmCoord problem_size, int batch_count, typename EpilogueOutputOp::Params epilogue, void const * ptr_A, void const * ptr_B, void const * ptr_C, void * ptr_D, void * ptr_gemm_k_reduction, int64_t batch_stride_A, int64_t batch_stride_B, int64_t batch_stride_C, int64_t batch_stride_D, int64_t batch_stride_gemm_k_reduction, typename LayoutA::Stride::Index lda, typename LayoutB::Stride::Index ldb, typename LayoutC::Stride::Index ldc, typename LayoutC::Stride::Index ldd, typename LayoutGemmKReduction::Stride::Index ld_gemm_k_reduction) : UniversalArgumentsBase(mode, problem_size, batch_count, batch_stride_D), epilogue(epilogue), ptr_A(ptr_A), ptr_B(ptr_B), ptr_C(ptr_C), ptr_D(ptr_D), ptr_gemm_k_reduction(ptr_gemm_k_reduction), batch_stride_A(batch_stride_A), batch_stride_B(batch_stride_B), batch_stride_C(batch_stride_C), batch_stride_gemm_k_reduction(batch_stride_gemm_k_reduction), lda(lda), ldb(ldb), ldc(ldc), ldd(ldd), ld_gemm_k_reduction(ld_gemm_k_reduction) { CUTLASS_TRACE_HOST("GemmUniversal::Arguments::Arguments() - problem_size: " << problem_size); } /// Returns arguments for the transposed problem Arguments transposed_problem() const { Arguments args(*this); std::swap(args.problem_size.m(), args.problem_size.n()); std::swap(args.ptr_A, args.ptr_B); std::swap(args.lda, args.ldb); std::swap(args.batch_stride_A, args.batch_stride_B); return args; } }; // // Structure for precomputing values in host memory and passing to kernels // /// Parameters structure struct Params : UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB> { using ParamsBase = UniversalParamsBase< ThreadblockSwizzle, ThreadblockShape, ElementA, ElementB, ElementC, LayoutA, LayoutB>; // // Data members // typename Mma::IteratorA::Params params_A; typename Mma::IteratorB::Params params_B; typename Epilogue::OutputTileIterator::Params params_C; typename Epilogue::OutputTileIterator::Params params_D; typename EpilogueOutputOp::Params output_op; void * ptr_A; void * ptr_B; void * ptr_C; void * ptr_D; void * ptr_gemm_k_reduction; int64_t batch_stride_A; int64_t batch_stride_B; int64_t batch_stride_C; int64_t batch_stride_gemm_k_reduction; // // Host dispatch API // /// Default constructor Params() = default; /// Constructor Params( Arguments const &args, /// GEMM application arguments int device_sms, /// Number of SMs on the device int sm_occupancy) /// Kernel SM occupancy (in thread blocks) : ParamsBase(args, device_sms, sm_occupancy), params_A(args.lda), params_B(args.ldb), params_C(args.ldc), params_D(args.ldd), output_op(args.epilogue), ptr_A(const_cast<void *>(args.ptr_A)), ptr_B(const_cast<void *>(args.ptr_B)), ptr_C(const_cast<void *>(args.ptr_C)), batch_stride_A(args.batch_stride_A), batch_stride_B(args.batch_stride_B), batch_stride_C(args.batch_stride_C), batch_stride_gemm_k_reduction(args.batch_stride_gemm_k_reduction), ptr_D(args.ptr_D), ptr_gemm_k_reduction(args.ptr_gemm_k_reduction) {} /// Assign and initialize the specified workspace buffer. Assumes /// the memory allocated to workspace is at least as large as get_workspace_size(). Status init_workspace( void *workspace, cudaStream_t stream = nullptr) { CUTLASS_TRACE_HOST("GemmUniversal::Params::Params() - problem_size: " << this->problem_size); if (this->mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D = workspace; ptr_gemm_k_reduction = static_cast<uint8_t *>(workspace) + sizeof(ElementC) * size_t(this->batch_stride_D) * size_t(this->grid_tiled_shape.k()); return Status::kSuccess; } return ParamsBase::init_workspace(workspace, stream); } /// Returns the workspace size (in bytes) needed for this problem geometry size_t get_workspace_size() const { size_t workspace_bytes = ParamsBase::get_workspace_size(); if (this->mode == GemmUniversalMode::kGemmSplitKParallel) { // Split-K parallel always requires a temporary workspace workspace_bytes += sizeof(ElementC) * size_t(batch_stride_gemm_k_reduction) * size_t(this->grid_tiled_shape.k()); } return workspace_bytes; } /// Lightweight update given a subset of arguments. void update(Arguments const &args) { ptr_A = const_cast<void *>(args.ptr_A); ptr_B = const_cast<void *>(args.ptr_B); ptr_C = const_cast<void *>(args.ptr_C); ptr_D = args.ptr_D; ptr_gemm_k_reduction = args.ptr_gemm_k_reduction; batch_stride_A = args.batch_stride_A; batch_stride_B = args.batch_stride_B; batch_stride_C = args.batch_stride_C; batch_stride_gemm_k_reduction = args.batch_stride_gemm_k_reduction; this->batch_stride_D = args.batch_stride_D; output_op = args.epilogue; CUTLASS_TRACE_HOST("GemmUniversal::Params::update()"); } }; /// Shared memory storage structure union SharedStorage { typename Mma::SharedStorage main_loop; typename Epilogue::SharedStorage epilogue; }; public: // // Host dispatch API // /// Determines whether kernel satisfies alignment static Status can_implement( cutlass::gemm::GemmCoord const & problem_size) { CUTLASS_TRACE_HOST("GemmUniversal::can_implement()"); static int const kAlignmentA = (platform::is_same<typename Mma::IteratorA::Layout, layout::ColumnMajorInterleaved<32>>::value) ? 32 : (platform::is_same<typename Mma::IteratorA::Layout, layout::ColumnMajorInterleaved<64>>::value) ? 64 : Mma::IteratorA::AccessType::kElements; static int const kAlignmentB = (platform::is_same<typename Mma::IteratorB::Layout, layout::RowMajorInterleaved<32>>::value) ? 32 : (platform::is_same<typename Mma::IteratorB::Layout, layout::RowMajorInterleaved<64>>::value) ? 64 : Mma::IteratorB::AccessType::kElements; static int const kAlignmentC = (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value) ? 32 : (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) ? 64 : Epilogue::OutputTileIterator::kElementsPerAccess; bool isAMisaligned = false; bool isBMisaligned = false; bool isCMisaligned = false; if (platform::is_same<LayoutA, layout::RowMajor>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajor>::value) { isAMisaligned = problem_size.m() % kAlignmentA; } else if (platform::is_same<LayoutA, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutA, layout::ColumnMajorInterleaved<64>>::value) { isAMisaligned = problem_size.k() % kAlignmentA; } if (platform::is_same<LayoutB, layout::RowMajor>::value) { isBMisaligned = problem_size.n() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::ColumnMajor>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } else if (platform::is_same<LayoutB, layout::RowMajorInterleaved<32>>::value || platform::is_same<LayoutB, layout::RowMajorInterleaved<64>>::value) { isBMisaligned = problem_size.k() % kAlignmentB; } if (platform::is_same<LayoutC, layout::RowMajor>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajor>::value) { isCMisaligned = problem_size.m() % kAlignmentC; } else if (platform::is_same<LayoutC, layout::ColumnMajorInterleaved<32>>::value || platform::is_same<LayoutC, layout::ColumnMajorInterleaved<64>>::value) { isCMisaligned = problem_size.n() % kAlignmentC; } if (isAMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand A"); return Status::kErrorMisalignedOperand; } if (isBMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand B"); return Status::kErrorMisalignedOperand; } if (isCMisaligned) { CUTLASS_TRACE_HOST(" returning kErrorMisalignedOperand for operand C"); return Status::kErrorMisalignedOperand; } CUTLASS_TRACE_HOST(" returning kSuccess"); return Status::kSuccess; } static Status can_implement(Arguments const &args) { return can_implement(args.problem_size); } public: // // Device-only API // // Factory invocation CUTLASS_DEVICE static void invoke( Params const &params, SharedStorage &shared_storage) { GemmWithKReduction op; op(params, shared_storage); } /// Executes one GEMM CUTLASS_DEVICE void operator()(Params const &params, SharedStorage &shared_storage) { // Compute threadblock location ThreadblockSwizzle threadblock_swizzle; cutlass::gemm::GemmCoord threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); // Early exit if CTA is out of range if (params.grid_tiled_shape.m() <= threadblock_tile_offset.m() || params.grid_tiled_shape.n() <= threadblock_tile_offset.n()) { return; } int offset_k = 0; int problem_size_k = params.problem_size.k(); ElementA *ptr_A = static_cast<ElementA *>(params.ptr_A); ElementB *ptr_B = static_cast<ElementB *>(params.ptr_B); // // Fetch pointers based on mode. // if (params.mode == GemmUniversalMode::kGemm || params.mode == GemmUniversalMode::kGemmSplitKParallel) { if (threadblock_tile_offset.k() + 1 < params.grid_tiled_shape.k()) { problem_size_k = (threadblock_tile_offset.k() + 1) * params.gemm_k_size; } offset_k = threadblock_tile_offset.k() * params.gemm_k_size; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_A += threadblock_tile_offset.k() * params.batch_stride_A; ptr_B += threadblock_tile_offset.k() * params.batch_stride_B; } else if (params.mode == GemmUniversalMode::kArray) { ptr_A = static_cast<ElementA * const *>(params.ptr_A)[threadblock_tile_offset.k()]; ptr_B = static_cast<ElementB * const *>(params.ptr_B)[threadblock_tile_offset.k()]; } __syncthreads(); // Compute initial location in logical coordinates cutlass::MatrixCoord tb_offset_A{ threadblock_tile_offset.m() * Mma::Shape::kM, offset_k, }; cutlass::MatrixCoord tb_offset_B{ offset_k, threadblock_tile_offset.n() * Mma::Shape::kN }; // Compute position within threadblock int thread_idx = threadIdx.x; // Construct iterators to A and B operands typename Mma::IteratorA iterator_A( params.params_A, ptr_A, {params.problem_size.m(), problem_size_k}, thread_idx, tb_offset_A); typename Mma::IteratorB iterator_B( params.params_B, ptr_B, {problem_size_k, params.problem_size.n()}, thread_idx, tb_offset_B); // Broadcast the warp_id computed by lane 0 to ensure dependent code // is compiled as warp-uniform. int warp_idx = canonical_warp_idx_sync(); int lane_idx = threadIdx.x % 32; // // Main loop // // Construct thread-scoped matrix multiply Mma mma(shared_storage.main_loop, thread_idx, warp_idx, lane_idx); typename Mma::FragmentC accumulators; accumulators.clear(); typename Mma::FragmentReduction gemm_k_accumulators; gemm_k_accumulators.clear(); // Compute threadblock-scoped matrix multiply-add int gemm_k_iterations = (problem_size_k - offset_k + Mma::Shape::kK - 1) / Mma::Shape::kK; // Compute threadblock-scoped matrix multiply-add mma( gemm_k_iterations, accumulators, iterator_A, iterator_B, accumulators, gemm_k_accumulators); // // Epilogue // EpilogueOutputOp output_op(params.output_op); // // Masked tile iterators constructed from members // threadblock_tile_offset = threadblock_swizzle.get_tile_offset(params.swizzle_log_tile); //assume identity swizzle MatrixCoord threadblock_offset( threadblock_tile_offset.m() * Mma::Shape::kM, threadblock_tile_offset.n() * Mma::Shape::kN ); int block_idx = threadblock_tile_offset.m() + threadblock_tile_offset.n() * params.grid_tiled_shape.m(); ElementC *ptr_C = static_cast<ElementC *>(params.ptr_C); ElementC *ptr_D = static_cast<ElementC *>(params.ptr_D); ElementC *ptr_gemm_k_reduction = static_cast<ElementC *>(params.ptr_gemm_k_reduction); // // Fetch pointers based on mode. // // Construct the semaphore. Semaphore semaphore(params.semaphore + block_idx, thread_idx); if (params.mode == GemmUniversalMode::kGemm) { // If performing a reduction via split-K, fetch the initial synchronization if (params.grid_tiled_shape.k() > 1) { // Fetch the synchronization lock initially but do not block. semaphore.fetch(); // Indicate which position in a serial reduction the output operator is currently updating output_op.set_k_partition(threadblock_tile_offset.k(), params.grid_tiled_shape.k()); } } else if (params.mode == GemmUniversalMode::kGemmSplitKParallel) { ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; ptr_gemm_k_reduction += threadblock_tile_offset.k() * params.batch_stride_gemm_k_reduction; } else if (params.mode == GemmUniversalMode::kBatched) { ptr_C += threadblock_tile_offset.k() * params.batch_stride_C; ptr_D += threadblock_tile_offset.k() * params.batch_stride_D; } else if (params.mode == GemmUniversalMode::kArray) { ptr_C = static_cast<ElementC * const *>(params.ptr_C)[threadblock_tile_offset.k()]; ptr_D = static_cast<ElementC * const *>(params.ptr_D)[threadblock_tile_offset.k()]; } // Tile iterator loading from source tensor. typename Epilogue::OutputTileIterator iterator_C( params.params_C, ptr_C, params.problem_size.mn(), thread_idx, threadblock_offset ); // Tile iterator writing to destination tensor. typename Epilogue::OutputTileIterator iterator_D( params.params_D, ptr_D, params.problem_size.mn(), thread_idx, threadblock_offset ); Epilogue epilogue( shared_storage.epilogue, thread_idx, warp_idx, lane_idx); // Wait on the semaphore - this latency may have been covered by iterator construction if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { // For subsequent threadblocks, the source matrix is held in the 'D' tensor. if (threadblock_tile_offset.k()) { iterator_C = iterator_D; } semaphore.wait(threadblock_tile_offset.k()); } // Execute the epilogue operator to update the destination tensor. epilogue( output_op, iterator_D, accumulators, iterator_C); if ((kReduceKForA && threadblock_tile_offset.n() == 0) || (!kReduceKForA && threadblock_tile_offset.m() == 0)) { int warp_idx_mn = warp_idx % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Mma::Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Mma::Base::WarpCount::kM; if ((kReduceKForA && warp_idx_n == 0) || (!kReduceKForA && warp_idx_m == 0)) { int reduction_warp_idx = kReduceKForA ? warp_idx_m : warp_idx_n; int reduction_threadblock_offset = kReduceKForA ? threadblock_tile_offset.m() : threadblock_tile_offset.n(); int reduction_vector_size = kReduceKForA ? params.problem_size.m() : params.problem_size.n(); EpilogueGemmKReduction epilogue_gemm_k_reduction(thread_idx, reduction_warp_idx, lane_idx, reduction_threadblock_offset, ptr_gemm_k_reduction); epilogue_gemm_k_reduction( reduction_vector_size, gemm_k_accumulators, params.mode == GemmUniversalMode::kGemm && (params.grid_tiled_shape.k() > 1) && (threadblock_tile_offset.k() > 0)); } } // // Release the semaphore // if (params.mode == GemmUniversalMode::kGemm && params.grid_tiled_shape.k() > 1) { int lock = 0; if (params.grid_tiled_shape.k() == threadblock_tile_offset.k() + 1) { // The final threadblock resets the semaphore for subsequent grids. lock = 0; } else { // Otherwise, the semaphore is incremented lock = threadblock_tile_offset.k() + 1; } semaphore.release(lock); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h/0
{ "file_path": "cutlass/include/cutlass/gemm/kernel/gemm_with_k_reduction.h", "repo_id": "cutlass", "token_count": 10014 }
29
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates exposing architecture support for multiply-add operations */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/tensor_ref.h" #include "cutlass/layout/matrix.h" #include "cutlass/arch/mma.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/thread/mma.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Gemplate that handles all packed matrix layouts template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: layout::MapFunc) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: layout::MapFunc) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: layout::MapFunc) typename LayoutC_, /// Operator used to compute GEMM typename Operator_ > struct MmaGeneric { /// Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; /// Data type of operand A using ElementA = ElementA_; /// Layout of A matrix (concept: layout::MapFunc) using LayoutA = LayoutA_; /// Data type of operand B using ElementB = ElementB_; /// Layout of B matrix (concept: layout::MapFunc) using LayoutB = LayoutB_; /// Element type of operand C using ElementC = ElementC_; /// Layout of C matrix (concept: layout::MapFunc) using LayoutC = LayoutC_; /// Underlying mathematical operator using Operator = Operator_; /// A operand storage using FragmentA = Array<ElementA, Shape::kMK>; /// B operand storage using FragmentB = Array<ElementB, Shape::kKN>; /// C operand storage using FragmentC = Array<ElementC, Shape::kMN>; /// Instruction using MmaOp = arch::Mma< gemm::GemmShape<1,1,1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator>; static bool const kMultipleOf2 = ((Shape::kM % 2 == 0) && (Shape::kN % 2 == 0)); static bool const kAllFp32 = platform::is_same<ElementA, float>::value && platform::is_same<ElementB, float>::value && platform::is_same<ElementC, float>::value; // // Methods // /// Computes a matrix product D = A * B + C CUTLASS_HOST_DEVICE void operator()( FragmentC & D, FragmentA const & A, FragmentB const & B, FragmentC const & C) { TensorRef<ElementA const, LayoutA> a_ref( reinterpret_cast<ElementA const *>(&A), LayoutA::packed({Shape::kM, Shape::kK})); TensorRef<ElementB const, LayoutB> b_ref( reinterpret_cast<ElementB const *>(&B), LayoutB::packed({Shape::kK, Shape::kN})); TensorRef<ElementC, LayoutC> d_ref( reinterpret_cast<ElementC *>(&D), LayoutC::packed(make_Coord(Shape::kM, Shape::kN))); MmaOp mma_op; // Copy accumulators D = C; // Compute matrix product CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Shape::kK; ++k) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 860) if (kMultipleOf2 && kAllFp32) { //2x2 zigzag - m and n loops to increment by 2. Inner loop to process 4 multiply-adds in a 2x2 tile. CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Shape::kN; n+=2) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Shape::kM; m+=2) { int m_serpentine = (n % 4) ? (Shape::kM - 2 - m) : m; //top-left element in 2x2 tile { MatrixCoord mn(m_serpentine, n); MatrixCoord mk(m_serpentine, k); MatrixCoord kn(k, n); Array<ElementC, 1> d; Array<ElementA, 1> a; Array<ElementB, 1> b; d[0] = d_ref.at(mn); a[0] = a_ref.at(mk); b[0] = b_ref.at(kn); mma_op(d, a, b, d); d_ref.at(mn) = d[0]; } //bottom-left element in 2x2 tile { MatrixCoord mn(m_serpentine+1, n); MatrixCoord mk(m_serpentine+1, k); MatrixCoord kn(k, n); Array<ElementC, 1> d; Array<ElementA, 1> a; Array<ElementB, 1> b; d[0] = d_ref.at(mn); a[0] = a_ref.at(mk); b[0] = b_ref.at(kn); mma_op(d, a, b, d); d_ref.at(mn) = d[0]; } //bottom-right element in 2x2 tile { MatrixCoord mn(m_serpentine+1, n+1); MatrixCoord mk(m_serpentine+1, k); MatrixCoord kn(k, n+1); Array<ElementC, 1> d; Array<ElementA, 1> a; Array<ElementB, 1> b; d[0] = d_ref.at(mn); a[0] = a_ref.at(mk); b[0] = b_ref.at(kn); mma_op(d, a, b, d); d_ref.at(mn) = d[0]; } //top-right element in 2x2 tile { MatrixCoord mn(m_serpentine, n+1); MatrixCoord mk(m_serpentine, k); MatrixCoord kn(k, n+1); Array<ElementC, 1> d; Array<ElementA, 1> a; Array<ElementB, 1> b; d[0] = d_ref.at(mn); a[0] = a_ref.at(mk); b[0] = b_ref.at(kn); mma_op(d, a, b, d); d_ref.at(mn) = d[0]; } } } } else #endif { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Shape::kN; ++n) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Shape::kM; ++m) { int m_serpentine = (n % 2) ? (Shape::kM - 1 - m) : m; MatrixCoord mn(m_serpentine, n); MatrixCoord mk(m_serpentine, k); MatrixCoord kn(k, n); Array<ElementC, 1> d; Array<ElementA, 1> a; Array<ElementB, 1> b; d[0] = d_ref.at(mn); a[0] = a_ref.at(mk); b[0] = b_ref.at(kn); mma_op(d, a, b, d); d_ref.at(mn) = d[0]; } } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { /// Matrix multiply-add operation - assumes operand B is not changing struct MmaComplexF32_Column { using Shape = gemm::GemmShape<1, 1, 1>; using ElementC = complex<float>; CUTLASS_HOST_DEVICE void operator()( Array<complex<float>, 1> &d, Array<complex<float>, 1> const &a, Array<complex<float>, 1> const &b, Array<complex<float>, 1> const &c ) { d[0].real() = a[0].real() * b[0].real() + c[0].real(); d[0].imag() = a[0].real() * b[0].imag() + d[0].imag(); d[0].real() = -a[0].imag() * b[0].imag() + d[0].real(); d[0].imag() = a[0].imag() * b[0].real() + c[0].imag(); } }; /// Matrix multiply-add operation - assumes operand A is not changing struct MmaComplexF32_Corner { using Shape = gemm::GemmShape<1, 1, 1>; using ElementC = complex<float>; CUTLASS_HOST_DEVICE void operator()( Array<complex<float>, 1> &d, Array<complex<float>, 1> const &a, Array<complex<float>, 1> const &b, Array<complex<float>, 1> const &c ) { d[0].real() = -a[0].imag() * b[0].imag() + d[0].real(); d[0].imag() = a[0].real() * b[0].imag() + d[0].imag(); d[0].real() = a[0].real() * b[0].real() + c[0].real(); d[0].imag() = a[0].imag() * b[0].real() + c[0].imag(); } }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Gemplate that handles all packed matrix layouts template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Layout of A matrix (concept: layout::MapFunc) typename LayoutA_, /// Layout of B matrix (concept: layout::MapFunc) typename LayoutB_, /// Layout of C matrix (concept: layout::MapFunc) typename LayoutC_ > struct MmaGeneric< Shape_, complex<float>, LayoutA_, complex<float>, LayoutB_, complex<float>, LayoutC_, arch::OpMultiplyAdd> { /// Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; /// Data type of operand A using ElementA = complex<float>; /// Layout of A matrix (concept: layout::MapFunc) using LayoutA = LayoutA_; /// Data type of operand B using ElementB = complex<float>; /// Layout of B matrix (concept: layout::MapFunc) using LayoutB = LayoutB_; /// Element type of operand C using ElementC = complex<float>; /// Layout of C matrix (concept: layout::MapFunc) using LayoutC = LayoutC_; /// Underlying mathematical operator using Operator = arch::OpMultiplyAdd; /// A operand storage using FragmentA = Array<ElementA, Shape::kMK>; /// B operand storage using FragmentB = Array<ElementB, Shape::kKN>; /// C operand storage using FragmentC = Array<ElementC, Shape::kMN>; /// Instruction using MmaOp = arch::Mma< gemm::GemmShape<1,1,1>, 1, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator>; // // Methods // /// Computes a matrix product D = A * B + C CUTLASS_HOST_DEVICE void operator()( FragmentC & D, FragmentA const & A, FragmentB const & B, FragmentC const & C) { TensorRef<ElementA const, LayoutA> a_ref( reinterpret_cast<ElementA const *>(&A), LayoutA::packed({Shape::kM, Shape::kK})); TensorRef<ElementB const, LayoutB> b_ref( reinterpret_cast<ElementB const *>(&B), LayoutB::packed({Shape::kK, Shape::kN})); TensorRef<ElementC, LayoutC> d_ref( reinterpret_cast<ElementC *>(&D), LayoutC::packed(make_Coord(Shape::kM, Shape::kN))); detail::MmaComplexF32_Column mma_column; detail::MmaComplexF32_Corner mma_corner; // Copy accumulators D = C; // Compute matrix product CUTLASS_PRAGMA_UNROLL for (int k = 0; k < Shape::kK; ++k) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < Shape::kN; ++n) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < Shape::kM; ++m) { int m_serpentine = (n % 2) ? (Shape::kM - 1 - m) : m; MatrixCoord mn(m_serpentine, n); MatrixCoord mk(m_serpentine, k); MatrixCoord kn(k, n); Array<ElementC, 1> d; Array<ElementA, 1> a; Array<ElementB, 1> b; d[0] = d_ref.at(mn); a[0] = a_ref.at(mk); b[0] = b_ref.at(kn); if ((m == 0 && n) || m == Shape::kM - 1) { mma_corner(d, a, b, d); } else { mma_column(d, a, b, d); } d_ref.at(mn) = d[0]; } } } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Gemplate that handles conventional layouts for FFMA and DFMA GEMM template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: layout::MapFunc) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: layout::MapFunc) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: layout::MapFunc) typename LayoutC_ > struct Mma< Shape_, ElementA_, LayoutA_, ElementB_, LayoutB_, ElementC_, LayoutC_, arch::OpMultiplyAdd, bool> { /// Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; /// Data type of operand A using ElementA = ElementA_; /// Layout of A matrix (concept: layout::MapFunc) using LayoutA = LayoutA_; /// Data type of operand B using ElementB = ElementB_; /// Layout of B matrix (concept: layout::MapFunc) using LayoutB = LayoutB_; /// Element type of operand C using ElementC = ElementC_; /// Layout of C matrix (concept: layout::MapFunc) using LayoutC = LayoutC_; /// Underlying mathematical operator using Operator = arch::OpMultiplyAdd; /// A operand storage using FragmentA = Array<ElementA, Shape::kMK>; /// B operand storage using FragmentB = Array<ElementB, Shape::kKN>; /// C operand storage using FragmentC = Array<ElementC, Shape::kMN>; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename MmaGeneric< Shape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator>::MmaOp; // // Methods // /// Computes a matrix product D = A * B + C CUTLASS_HOST_DEVICE void operator()( FragmentC & D, FragmentA const & A, FragmentB const & B, FragmentC const & C) { MmaGeneric< Shape, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Operator> mma; mma(D, A, B, C); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/thread/mma_sm50.h/0
{ "file_path": "cutlass/include/cutlass/gemm/thread/mma_sm50.h", "repo_id": "cutlass", "token_count": 6595 }
30
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. It loads two loop invariant vectors, mean and var, in the prologue and stores them in the register file. In the mainloop, it loads two loop variant vectors, gamma and beta, by using cp.async. We will call elementwise operation to apply var, mean, gamma, beta between ldmatrix and warp mma. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/transform/threadblock/predicated_scale_bias_vector_iterator.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "cutlass/gemm/warp/layernorm_scale_bias_transform.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Element type of scale and bias vectors typename ElementScaleBias_, /// Layout of scale and bias vectors typename LayoutScaleBias_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// WarpIterator to load Scale or Bias vector from the shared memory typename WarpIteratorGammaBeta_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class MmaMainloopFusionBase { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Element type of scale and bias vectors using ElementScaleBias = ElementScaleBias_; /// Layout of scale and bias vectors using LayoutScaleBias = LayoutScaleBias_; ///< Policy describing tuning details using Policy = Policy_; ///< WarpIterator to load Scale or Bias vector from the shared memory using WarpIteratorGammaBeta = WarpIteratorGammaBeta_; // // Dependent types // /// Warp-level Mma using Operator = typename Policy::Operator; /// Shape describing the overall GEMM computed from shared memory /// by each warp. using WarpGemm = typename Policy::Operator::Shape; /// Shape describing the number of warps filling the CTA using WarpCount = cutlass::gemm::GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>; /// Number of warp-level GEMM oeprations static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); /// Number of stages static int const kStages = Stages; /// Tensor reference to the A operand using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>; /// Tensor reference to the scale and bias vectors using TensorRefGammaBeta = TensorRef<ElementScaleBias, LayoutScaleBias>; /// Tensor reference to the B operand using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>; // // Nested structs // /// Shared storage object needed by threadblock-scoped GEMM class SharedStorage { public: // // Type definitions // /// Shape of the A matrix operand in shared memory using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow, Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the A scale and bias vectors in shared memory using ShapeGammaBeta = MatrixShape<1 + Policy::SmemPaddingA::kRow, 2 * Shape::kK * kStages + Policy::SmemPaddingA::kColumn>; /// Shape of the B matrix operand in shared memory using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow, Shape::kN + Policy::SmemPaddingB::kColumn>; public: // // Data members // /// Buffer for A operand AlignedBuffer<typename Operator::ElementA, ShapeA::kCount> operand_A; /// Buffer for B operand AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B; /// Buffer for A operand Scale and Bias AlignedBuffer<ElementScaleBias, ShapeGammaBeta::kCount> operand_A_gamma_beta; public: // // Methods // /// Returns a layout object for the A matrix CUTLASS_DEVICE static typename Operator::LayoutA LayoutA() { return Operator::LayoutA::packed({ShapeA::kRow, ShapeA::kColumn}); } /// Returns a layout object for the B matrix CUTLASS_HOST_DEVICE static typename Operator::LayoutB LayoutB() { return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); } /// Returns a layout object for the A scale and bias vectors CUTLASS_DEVICE static LayoutScaleBias LayoutScaleBias() { return LayoutScaleBias::packed( {ShapeGammaBeta::kRow, ShapeGammaBeta::kColumn}); } /// Returns a TensorRef to the A operand CUTLASS_HOST_DEVICE TensorRefA operand_A_ref() { return TensorRefA{operand_A.data(), LayoutA()}; } /// Returns a TensorRef to the B operand CUTLASS_HOST_DEVICE TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } /// Returns a TensorRef to the A operand Scale vector CUTLASS_HOST_DEVICE TensorRefGammaBeta operand_A_gamma_beta_ref() { return TensorRefGammaBeta{operand_A_gamma_beta.data(), LayoutScaleBias()}; } }; protected: // // Data members // /// Iterator to load a warp-scoped tile of A operand from shared memory typename Operator::IteratorA warp_tile_iterator_A_; /// Iterator to load a warp-scoped tile of A operand scale and bias vector /// from shared memory WarpIteratorGammaBeta warp_tile_iterator_A_gamma_beta_; /// Iterator to load a warp-scoped tile of B operand from shared memory typename Operator::IteratorB warp_tile_iterator_B_; public: /// Construct from tensor references CUTLASS_DEVICE MmaMainloopFusionBase( ///< Shared storage needed for internal use by threadblock-scoped GEMM SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx) : warp_tile_iterator_A_(shared_storage.operand_A_ref(), lane_idx), warp_tile_iterator_A_gamma_beta_( shared_storage.operand_A_gamma_beta_ref(), lane_idx), warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) {} }; /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Iterates over vectors of var and mean vector in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorVarMean_, /// Iterates over vectors of scale and bias vector in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorGammaBeta_, /// Iterates over vectors of scale and bias vector in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorGammaBeta_, /// Cache operation for scale/bias operand cutlass::arch::CacheOperation::Kind CacheOpGammaBeta, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy_, /// WarpIterator to load Scale or Bias vector from the shared memory typename WarpIteratorGammaBeta_, /// Number of stages, int Stages, /// Use zfill or predicate for out-of-bound cp.async SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, /// Used for partial specialization typename Enable = bool> class MmaLayernormMainloopFusionMultistage : public MmaMainloopFusionBase<Shape_, typename IteratorGammaBeta_::Element, typename IteratorGammaBeta_::Layout, Policy_, WarpIteratorGammaBeta_, Stages> { public: ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B operand in global memory using IteratorB = IteratorB_; ///< Iterates over tiles of the var and mean vectors in global memory using IteratorVarMean = IteratorVarMean_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorGammaBeta = IteratorGammaBeta_; ///< WarpIterator to load Scale or Bias vector from the shared memory using WarpIteratorGammaBeta = WarpIteratorGammaBeta_; ///< Policy describing tuning details using Policy = Policy_; ///< Base class using Base = MmaMainloopFusionBase<Shape_, typename IteratorGammaBeta::Element, typename IteratorGammaBeta::Layout, Policy, WarpIteratorGammaBeta, Stages>; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB = SmemIteratorB_; using SmemIteratorGammaBeta = SmemIteratorGammaBeta_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; static cutlass::arch::CacheOperation::Kind const kCacheOpGammaBeta = CacheOpGammaBeta; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy::Operator::FragmentC; /// Warp-level Mma using Operator = typename Policy::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB = Operator::kTransformB; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator::FragmentA; using WarpLoadedFragmentB = typename Operator::FragmentB; using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; using WarpLoadedFragmentVarMean = typename IteratorVarMean::Fragment; using WarpLoadedFragmentGammaBeta = typename WarpIteratorGammaBeta::Fragment; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of A operand scale vector to shared memory SmemIteratorGammaBeta smem_iterator_A_gamma_beta_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB smem_iterator_B_; int warp_idx_m_; int warp_idx_n_; public: /// Construct from tensor references CUTLASS_DEVICE MmaLayernormMainloopFusionMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_A_gamma_beta_(shared_storage.operand_A_gamma_beta_ref(), thread_idx), smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); warp_idx_m_ = warp_idx_mn % Base::WarpCount::kM; warp_idx_n_ = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_A_gamma_beta_.add_tile_offset( {warp_idx_m_, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n_}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorGammaBeta &iterator_A_gamma_beta, IteratorB &iterator_B, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } ++iterator_A; } ++this->smem_iterator_A_; } } // Async Copy for operand A scale and bias vector. Scale and bias vectors // are small. One iteration is enough. if (group_start_A == 0) { typename IteratorGammaBeta::AccessType *dst_ptr = reinterpret_cast<typename IteratorGammaBeta::AccessType *>( this->smem_iterator_A_gamma_beta_.get()); int const kSrcBytes = sizeof_bits<typename IteratorGammaBeta::Element>::value * IteratorGammaBeta::kElementsPerAccess / 8; cutlass::arch::cp_async<kSrcBytes, kCacheOpGammaBeta>( dst_ptr, iterator_A_gamma_beta.get(), iterator_A_gamma_beta.valid()); } iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); this->smem_iterator_B_.set_iteration_index(group_start_B); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B.valid()); } ++iterator_B; } ++this->smem_iterator_B_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB iterator_B, ///< iterator over B operand in global memory IteratorVarMean iterator_var_mean, ///< iterator over scale and bias vectors in global memory IteratorGammaBeta iterator_A_gamma_beta, ///< initial value of accumulator FragmentC const &src_accum) { // // Prologue // // Issue several complete stages WarpLoadedFragmentVarMean warp_loaded_frag_var_mean; iterator_var_mean.add_tile_offset({0, warp_idx_m_}); iterator_var_mean.load(warp_loaded_frag_var_mean); CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } // Async Copy for operand A scale and bias vectors. Scale and bias // vectors are small. One iteration is enough. { typename IteratorGammaBeta::AccessType *dst_ptr = reinterpret_cast<typename IteratorGammaBeta::AccessType *>( this->smem_iterator_A_gamma_beta_.get()); int const kSrcBytes = sizeof_bits<typename IteratorGammaBeta::Element>::value * IteratorGammaBeta::kElementsPerAccess / 8; cutlass::arch::cp_async<kSrcBytes, kCacheOpGammaBeta>( dst_ptr, iterator_A_gamma_beta.get(), iterator_A_gamma_beta.valid()); } iterator_B.set_iteration_index(0); this->smem_iterator_B_.set_iteration_index(0); // Async Copy for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB::AccessType *dst_ptr = reinterpret_cast<typename IteratorB::AccessType *>( this->smem_iterator_B_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value * IteratorB::ThreadMap::kElementsPerAccess / IteratorB::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B.get(), iterator_B.valid()); ++iterator_B; } ++this->smem_iterator_B_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_A_gamma_beta.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_A_gamma_beta_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum = src_accum; // Waits until kStages-2 stages have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB warp_loaded_frag_B[2]; WarpLoadedFragmentGammaBeta warp_loaded_frag_A_gamma_beta[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB warp_transformed_frag_B[2]; Operator warp_mma; cutlass::gemm::warp::LayernormScaleBiasTransform<WarpTransformedFragmentA, WarpLoadedFragmentVarMean, WarpLoadedFragmentGammaBeta> elementwise_transform; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_A_gamma_beta_.set_kgroup_index(0); this->warp_tile_iterator_B_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_A_gamma_beta_.load( warp_loaded_frag_A_gamma_beta[0]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_A_gamma_beta_; ++this->warp_tile_iterator_B_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma.transform(warp_transformed_frag_A[0], warp_transformed_frag_B[0], warp_loaded_frag_A[0], warp_loaded_frag_B[0]); elementwise_transform(warp_transformed_frag_A[0], warp_loaded_frag_var_mean, warp_loaded_frag_A_gamma_beta[0]); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_gamma_beta_.set_kgroup_index( (warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_A_gamma_beta_.load( warp_loaded_frag_A_gamma_beta[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_A_gamma_beta_; ++this->warp_tile_iterator_B_; if (warp_mma_k > 0) { warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B[warp_mma_k % 2]); elementwise_transform(warp_transformed_frag_A[warp_mma_k % 2], warp_loaded_frag_var_mean, warp_loaded_frag_A_gamma_beta[warp_mma_k % 2]); } warp_mma( accum, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B[warp_mma_k % 2], accum ); // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations - 1) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; copy_tiles_and_advance(iterator_A, iterator_A_gamma_beta, iterator_B, group_start_iteration_A, group_start_iteration_B); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; copy_tiles_and_advance(iterator_A, iterator_A_gamma_beta, iterator_B, group_start_iteration_A, group_start_iteration_B); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_A_gamma_beta.add_tile_offset({0, 1}); iterator_B.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_A_gamma_beta_.add_tile_offset({0, 1}); this->smem_iterator_B_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_A_gamma_beta_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_A_gamma_beta_.add_tile_offset( {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B_.add_tile_offset( {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_A_gamma_beta.clear_mask(gemm_k_iterations == 0); iterator_B.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) { warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B[(warp_mma_k + 1) % 2]); elementwise_transform( warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_var_mean, warp_loaded_frag_A_gamma_beta[(warp_mma_k + 1) % 2]); } } } // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h/0
{ "file_path": "cutlass/include/cutlass/gemm/threadblock/mma_layernorm_mainloop_fusion_multistage.h", "repo_id": "cutlass", "token_count": 13560 }
31
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default warp-level GEMM operators selected by data type, size, and layouts of operands. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/arch/mma.h" #include "cutlass/gemm/warp/mma_tensor_op.h" #include "cutlass/gemm/warp/mma_mixed_input_tensor_op.h" #include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial Specialization - inputs and output types are float - uses BF16 internally template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultMmaTensorOp< WarpShape_, GemmShape<16, 8, 8>, float, LayoutA, float, LayoutB, float, LayoutC, arch::OpMultiplyAddFastBF16, PartitionsK, AccumulatorsInRowMajor> { // Uses BF16 internally using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 8>, 32, bfloat16_t, cutlass::layout::RowMajor, bfloat16_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaTensorOp< WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial Specialization - inputs and output types are float - uses F16 internally template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultMmaTensorOp< WarpShape_, GemmShape<16, 8, 8>, float, LayoutA, float, LayoutB, float, LayoutC, arch::OpMultiplyAddFastF16, PartitionsK, AccumulatorsInRowMajor> { // Uses F16 internally using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 8>, 32, half_t, cutlass::layout::RowMajor, half_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaTensorOp< WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial Specialization - inputs and output types are float - uses TF32 internally template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Shape of target matrix multiply instruction (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultMmaTensorOp< WarpShape_, InstructionShape_, float, LayoutA, float, LayoutB, float, LayoutC, arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> { // Uses TF32 internally using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, tfloat32_t, cutlass::layout::RowMajor, tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaTensorOp< WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial Specialization - inputs and output types are float - uses TF32 for Fast Accurate FP32 template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Shape of target matrix multiply instruction (concept: GemmShape) typename InstructionShape_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultMmaTensorOp< WarpShape_, InstructionShape_, float, LayoutA, float, LayoutB, float, LayoutC, arch::OpMultiplyAddFastF32, PartitionsK, AccumulatorsInRowMajor> { // Uses TF32 internally using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< InstructionShape_, 32, cutlass::tfloat32_t, cutlass::layout::RowMajor, cutlass::tfloat32_t, cutlass::layout::ColumnMajor, float, cutlass::layout::RowMajor, arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaTensorOpFastF32< WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Partial Specialization - inputs are mixed types - uses wider datatype internally. /// (e.g. F16 <= F16 x S8 + F16, F16 <= BF16 x S8 + F32) template < /// Shape of one matrix production operation (concept: GemmShape) typename WarpShape_, /// Element type of A matrix typename ElementA, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA, /// Element type of B matrix typename ElementB, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB, /// Element type of C matrix typename ElementC, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC, /// Number of partitions along K dimension int PartitionsK, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor> struct DefaultMmaTensorOp< WarpShape_, GemmShape<16, 8, 16>, // InstructionShape ElementA, // Element type of A matrix in Global Memory LayoutA, // Layout of A matrix in Global Memory ElementB, // Element type of B matrix in Global Memory LayoutB, // Layout of B matrix in Global Memory ElementC, // Element type of C matrix in Global Memory LayoutC, // Layout of C matrix in Global Memory arch::OpMultiplyAddMixedInputUpcast, // Tag to indicate mixed-input datatype, where narrower datatype is upcasted to wider datatype PartitionsK, AccumulatorsInRowMajor> { // Check if the ElementA and ElementB are of different data types static_assert(!platform::is_same<ElementA, ElementB>::value, "DefaultMmaTensorOp with arch::OpMultiplyAddMixedInputUpcast ElementA and ElementB cannot be of the same data type"); // Data type used for internal computation - use the wider of the two data types for mma.sync operands using ElementOperand = typename platform::conditional<(sizeof(ElementA) > sizeof(ElementB)), ElementA, ElementB>::type; // Operand datatypes in the internal MMA instruction - use the wider of the two data types using ElementAMma = ElementOperand; using ElementBMma = ElementOperand; using MmaElementC = ElementC; // Uses using Policy = cutlass::gemm::warp::MmaTensorOpPolicy< cutlass::arch::Mma< GemmShape<16, 8, 16>, 32, ElementAMma, cutlass::layout::RowMajor, ElementBMma, cutlass::layout::ColumnMajor, MmaElementC, cutlass::layout::RowMajor, arch::OpMultiplyAdd >, cutlass::MatrixShape<1, 1> >; // Define the warp-level tensor op using Type = cutlass::gemm::warp::MmaMixedInputTensorOp< WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, Policy, PartitionsK, AccumulatorsInRowMajor>; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h" /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/default_mma_tensor_op_sm80.h", "repo_id": "cutlass", "token_count": 4025 }
32
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing warp-level matrix multiply-accumulate operations targeting Tensor Cores. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/platform/platform.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "cutlass/matrix_shape.h" #include "cutlass/arch/memory_sm75.h" #include "cutlass/arch/mma_sm75.h" #include "cutlass/arch/mma_sm80.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/warp/mma.h" #include "cutlass/gemm/warp/mma_tensor_op_policy.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace warp { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename T, typename S, int N, FloatRoundStyle Round> struct ConvertAndPack { using Converter = NumericArrayConverter<T, S, N, Round>; CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<S, N> const &source) { Converter converter; return converter(source); } }; template <typename T, int N, FloatRoundStyle Round> struct ConvertAndPack<T, T, N, Round> { CUTLASS_HOST_DEVICE Array<T, N> operator()(Array<T, N> const &source) { return source; } }; template <int N, FloatRoundStyle Round> struct ConvertAndPack<bfloat16_t, float, N, Round> { using Converter = NumericArrayConverter<bfloat16_t, float, N, Round>; CUTLASS_HOST_DEVICE Array<bfloat16_t, N> operator()(Array<float, N> const &source) { Converter converter; Array<float, N> tmp; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { int idx = (((i << 1) & 2) | ((i >> 1) & 1) | (i & 0xfffffffc)); tmp[i] = source[idx]; } return converter(tmp); } }; template <int N, FloatRoundStyle Round> struct ConvertAndPack<half_t, float, N, Round> { using Converter = NumericArrayConverter<half_t, float, N, Round>; CUTLASS_HOST_DEVICE Array<half_t, N> operator()(Array<float, N> const &source) { Converter converter; Array<float, N> tmp; CUTLASS_PRAGMA_UNROLL for (int i = 0; i < N; ++i) { int idx = (((i << 1) & 2) | ((i >> 1) & 1) | (i & 0xfffffffc)); tmp[i] = source[idx]; } return converter(tmp); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Data type of A elements typename ElementA_, /// Layout of A matrix (concept: MatrixLayout) typename LayoutA_, /// Data type of B elements typename ElementB_, /// Layout of B matrix (concept: MatrixLayout) typename LayoutB_, /// Element type of C matrix typename ElementC_, /// Layout of C matrix (concept: MatrixLayout) typename LayoutC_, /// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy) typename Policy_, /// Number of partitions along K dimension int PartitionsK_ = 1, /// Store the accumulators in row major or column major. Row major is used /// when output layout is interleaved. bool AccumulatorsInRowMajor = false, /// Used for partial specialization typename Enable = bool > class MmaTensorOp { public: /// Shape of warp-level matrix operation (concept: GemmShape) using Shape = Shape_; /// Data type of multiplicand A using ElementA = ElementA_; /// Layout of multiplicand A using LayoutA = LayoutA_; /// Data type of multiplicand B using ElementB = ElementB_; /// Layout of multiplicand B using LayoutB = LayoutB_; /// Data type of accumulator matrix C using ElementC = ElementC_; /// Layout of accumulator matrix C using LayoutC = LayoutC_; /// Shape of the warp in units of thread (concept: MmaLanePolicySimt) using Policy = Policy_; /// Underlying matrix multiply operator (concept: arch::Mma) using ArchMmaOperator = typename Policy::Operator; /// Indicates math operator using MathOperator = typename ArchMmaOperator::Operator; /// Architecture tag from underlying instruction using ArchTag = typename ArchMmaOperator::ArchTag; /// Indicates class of matrix operator using OperatorClass = arch::OpClassTensorOp; /// Shape of underlying instruction using InstructionShape = typename ArchMmaOperator::Shape; /// Complex transform on A operand static ComplexTransform const kTransformA = ComplexTransform::kNone; /// Complex transform on B operand static ComplexTransform const kTransformB = ComplexTransform::kNone; /// Number of threads participating in warp-level matrix product static int const kThreadCount = 32; /// Number of partitions along K dimension static int const kPartitionsK = PartitionsK_; #if defined(__CUDA_ARCH__) && ((__CUDA_ARCH__ < 800) || (__CUDA_ARCH__ == 890)) static int const kVerticalVisit = true; #else static int const kVerticalVisit = false; #endif public: /// Iterates over the A operand in memory using IteratorA = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA, MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for A tile using FragmentA = typename IteratorA::Fragment; /// Storage for transformed A tile using TransformedFragmentA = Array<typename ArchMmaOperator::ElementA, FragmentA::kElements>; /// Iterates over the B operand in memory using IteratorB = MmaTensorOpMultiplicandTileIterator< MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB, MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>, Policy::OpDelta::kRow, kThreadCount, kPartitionsK>; /// Storage for B tile using FragmentB = typename IteratorB::Fragment; /// Storage for transformed B tile using TransformedFragmentB = Array<typename ArchMmaOperator::ElementB, FragmentB::kElements>; /// Iterates over the C operand in memory using IteratorC = MmaTensorOpAccumulatorTileIterator< MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC, typename ArchMmaOperator::Shape, typename Policy::OpDelta>; /// Storage for C tile using FragmentC = typename IteratorC::Fragment; /// Number of mma operations performed using MmaIterations = MatrixShape< (Shape::kM + ArchMmaOperator::Shape::kM - 1) / ArchMmaOperator::Shape::kM, (Shape::kN + ArchMmaOperator::Shape::kN - 1) / ArchMmaOperator::Shape::kN >; public: /// Underlying matrix multiply operator (concept: arch::Mma) ArchMmaOperator mma; public: // // Methods // /// Ctor CUTLASS_DEVICE MmaTensorOp() {} /// Performs a warp-level matrix multiply-accumulate operation CUTLASS_DEVICE void operator()( FragmentC &D, TransformedFragmentA const &A, TransformedFragmentB const &B, FragmentC const &C ) const { using MmaOperandA = typename ArchMmaOperator::FragmentA; using MmaOperandB = typename ArchMmaOperator::FragmentB; using MmaOperandC = typename ArchMmaOperator::FragmentC; D = C; MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A); MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B); MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D); if (kVerticalVisit) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; ++m) { int m_serpentine = ((n % 2) ? (MmaIterations::kRow - 1 - m) : m); if (AccumulatorsInRowMajor) { // matrix B is reordered mma( ptr_D[n + m_serpentine * MmaIterations::kColumn], ptr_A[m_serpentine], ptr_B[n], ptr_D[n + m_serpentine * MmaIterations::kColumn]); } else { mma( ptr_D[m_serpentine + n * MmaIterations::kRow], ptr_A[m_serpentine], ptr_B[n], ptr_D[m_serpentine + n * MmaIterations::kRow]); } } } } else { CUTLASS_PRAGMA_UNROLL for (int m = 0; m < MmaIterations::kRow; ++m) { CUTLASS_PRAGMA_UNROLL for (int n = 0; n < MmaIterations::kColumn; ++n) { int n_serpentine = ((m % 2) ? (MmaIterations::kColumn - 1 - n) : n); if (AccumulatorsInRowMajor) { // matrix B is reordered mma( ptr_D[n_serpentine + m * MmaIterations::kColumn], ptr_A[m], ptr_B[n_serpentine], ptr_D[n_serpentine + m * MmaIterations::kColumn]); } else { mma(ptr_D[m + n_serpentine * MmaIterations::kRow], ptr_A[m], ptr_B[n_serpentine], ptr_D[m + n_serpentine * MmaIterations::kRow]); } } } } } /// Transform the mma operands to the required types CUTLASS_DEVICE void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B, FragmentA const &A, FragmentB const &B) const { // // Define conversions from source type to instruction type // FloatRoundStyle const kRoundA = PreferredRoundingMode<typename ArchMmaOperator::ElementA, ElementA>::kRound; FloatRoundStyle const kRoundB = PreferredRoundingMode<typename ArchMmaOperator::ElementB, ElementB>::kRound; if (kVerticalVisit) { detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA, FragmentA::kElements, kRoundA> convert_A; NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB, FragmentB::kElements / 2, kRoundB> convert_B; Array<ElementB, FragmentB::kElements / 2> const *ptr_B = reinterpret_cast<Array<ElementB, FragmentB::kElements / 2> const *>(&B); Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> * ptr_dst_B = reinterpret_cast<Array<typename ArchMmaOperator::ElementB, FragmentB::kElements / 2> *>(&dst_B); dst_A = convert_A(A); ptr_dst_B[0] = convert_B(ptr_B[0]); ptr_dst_B[1] = convert_B(ptr_B[1]); } else { detail::ConvertAndPack<typename ArchMmaOperator::ElementA, ElementA, FragmentA::kElements / 2, kRoundA> convert_A; NumericArrayConverter<typename ArchMmaOperator::ElementB, ElementB, FragmentB::kElements, kRoundB> convert_B; Array<ElementA, FragmentA::kElements / 2> const *ptr_A = reinterpret_cast<Array<ElementA, FragmentA::kElements / 2> const *>(&A); Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> * ptr_dst_A = reinterpret_cast<Array<typename ArchMmaOperator::ElementA, FragmentA::kElements / 2> *>(&dst_A); dst_B = convert_B(B); ptr_dst_A[0] = convert_A(ptr_A[0]); ptr_dst_A[1] = convert_A(ptr_A[1]); } } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace gemm } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////// #include "cutlass/gemm/warp/mma_tensor_op_fast_f32.h" /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm/warp/mma_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/gemm/warp/mma_tensor_op.h", "repo_id": "cutlass", "token_count": 5215 }
33
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/coord.h" namespace cutlass { namespace gemm { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Shape of a matrix multiply-add operation template < /// Rows of matrix product int M = 1, /// Columns of matrix product int N = 1, /// Inner dimension of matrix product int K = 1 > struct GemmShape { static int const kM = M; static int const kN = N; static int const kK = K; static int const kMN = M * N; static int const kMK = M * K; static int const kKN = N * K; static int const kMNK = M * N * K; static int const kCount = kMNK; // // Static member functions // /// Returns a Coord object CUTLASS_HOST_DEVICE static Coord<3> toCoord() { return make_Coord(kM, kN, kK); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Type alias of the transpose of a GemmShape template < /// concept: GemmShape typename Shape > using GemmShapeTranspose = GemmShape<Shape::kN, Shape::kM, Shape::kK>; //////////////////////////////////////////////////////////////////////////////////////////////////// /// GemmCoord is a structure derived from Coord<3> that specifies a location within the /// coordinate space of a GEMM problem. struct GemmCoord : public Coord<3, int> { /// Integer-valued index typedef int Index; /// Base type is a Coord of rank=3 typedef Coord<3, Index> Base; /// GEMM M dimension - rows of the output C matrix static int const kM = 0; /// GEMM N dimension - columns of the output C matrix static int const kN = 1; /// GEMM K dimension - inner dimension of the GEMM problem static int const kK = 2; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE GemmCoord() { } /// Constructs from Coord<3> and a batch CUTLASS_HOST_DEVICE GemmCoord(Coord<3, Index> const& coord): Base(make_Coord(coord[0], coord[1], coord[2])) { } /// Helper to construct from a K, N, M, batch variables CUTLASS_HOST_DEVICE GemmCoord(Index m, Index n, Index k): Base(make_Coord(m, n, k)) { } /// Returns the GEMM M coordinate CUTLASS_HOST_DEVICE Index const& m() const { return this->at(kM); } /// Returns reference to the GEMM M coordinate CUTLASS_HOST_DEVICE Index & m() { return this->at(kM); } /// Returns the GEMM N coordinate CUTLASS_HOST_DEVICE Index const& n() const { return this->at(kN); } /// Returns reference to the GEMM N coordinate CUTLASS_HOST_DEVICE Index & n() { return this->at(kN); } /// Returns the GEMM K coordinate CUTLASS_HOST_DEVICE Index const& k() const { return this->at(kK); } /// Returns reference to the GEMM K coordinate CUTLASS_HOST_DEVICE Index & k() { return this->at(kK); } /// Obtains a Coord<3> from GemmCoord CUTLASS_HOST_DEVICE Coord<3> mnk() const { return make_Coord(m(), n(), k()); } /// Obtains a Coord<3> from GemmCoord CUTLASS_HOST_DEVICE Coord<3> knm() const { return make_Coord(k(), n(), m()); } /// Obtains a Coord<2> from GemmCoord CUTLASS_HOST_DEVICE Coord<2> nm() const { return make_Coord(n(), m()); } /// Obtains a Coord<2> from GemmCoord CUTLASS_HOST_DEVICE Coord<2> mn() const { return make_Coord(m(), n()); } /// Obtains a Coord<2> from GemmCoord CUTLASS_HOST_DEVICE Coord<2> mk() const { return make_Coord(m(), k()); } /// Obtains a Coord<2> from GemmCoord CUTLASS_HOST_DEVICE Coord<2> km() const { return make_Coord(k(), m()); } /// Obtains a Coord<2> from GemmCoord CUTLASS_HOST_DEVICE Coord<2> nk() const { return make_Coord(n(), k()); } /// Obtains a Coord<2> from GemmCoord CUTLASS_HOST_DEVICE Coord<2> kn() const { return make_Coord(k(), n()); } // // Coord operators // /// Element-wise addition CUTLASS_HOST_DEVICE GemmCoord operator+(Base const& b) const { return GemmCoord(Base::operator+(b)); } /// Element-wise subtraction CUTLASS_HOST_DEVICE GemmCoord operator-(Base const& b) const { return GemmCoord(Base::operator-(b)); } /// Element-wise multiplication CUTLASS_HOST_DEVICE GemmCoord operator*(Base const& b) const { return GemmCoord(Base::operator*(b)); } /// Element-wise division CUTLASS_HOST_DEVICE GemmCoord operator/(Base const& b) const { return GemmCoord(Base::operator/(b)); } /// In-place addition CUTLASS_HOST_DEVICE GemmCoord& operator+=(Base const& b) { Base::operator+=(b); return *this; } /// In-place subtraction CUTLASS_HOST_DEVICE GemmCoord& operator-=(Base const& b) { Base::operator-=(b); return *this; } /// In-place multiplication CUTLASS_HOST_DEVICE GemmCoord& operator*=(Base const& b) { Base::operator*=(b); return *this; } /// In-place division CUTLASS_HOST_DEVICE GemmCoord& operator/=(Base const& b) { Base::operator/=(b); return *this; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// /// BatchedGemmCoord is a structure derived from Coord<4> that specifies a location within the /// coordinate space of a batched GEMM problem. struct BatchedGemmCoord : public Coord<4, int> { /// Integer-valued index typedef int Index; /// Base type is a Coord of rank=4 typedef Coord<4, Index> Base; /// GEMM M dimension - rows of the output C matrix static int const kM = 0; /// GEMM N dimension - columns of the output C matrix static int const kN = 1; /// GEMM K dimension - inner dimension of the GEMM problem static int const kK = 2; /// GEMM Batch dimension - inner dimension of the GEMM problem static int const kBatch = 3; // // Methods // /// Default ctor CUTLASS_HOST_DEVICE BatchedGemmCoord() { } /// Constructs from Coord<4> CUTLASS_HOST_DEVICE BatchedGemmCoord(Base const& coord): Base(coord) { } /// Helper to construct from a K, N, M, and batch variables CUTLASS_HOST_DEVICE BatchedGemmCoord(Index m, Index n, Index k, Index b): Base(make_Coord(m, n, k, b)) { } /// Returns the GEMM M coordinate CUTLASS_HOST_DEVICE Index const& m() const { return this->at(kM); } /// Returns reference to the GEMM M coordinate CUTLASS_HOST_DEVICE Index & m() { return this->at(kM); } /// Returns the GEMM N coordinate CUTLASS_HOST_DEVICE Index const& n() const { return this->at(kN); } /// Returns reference to the GEMM N coordinate CUTLASS_HOST_DEVICE Index & n() { return this->at(kN); } /// Returns the GEMM K coordinate CUTLASS_HOST_DEVICE Index const& k() const { return this->at(kK); } /// Returns reference to the GEMM K coordinate CUTLASS_HOST_DEVICE Index & k() { return this->at(kK); } /// Returns the GEMM batch coordinate CUTLASS_HOST_DEVICE Index const& batch() const { return this->at(kBatch); } /// Returns reference to the GEMM batch coordinate CUTLASS_HOST_DEVICE Index & batch() { return this->at(kBatch); } /// Obtains a GemmCoord from BatchedGemmCoord CUTLASS_HOST_DEVICE GemmCoord mnk() const { return GemmCoord(m(), n(), k()); } /// Obtains a Coord<4> from BatchedGemmCoord CUTLASS_HOST_DEVICE Coord<4> mnkb() const { return make_Coord(m(), n(), k(), batch()); } // // Coord operators // /// Element-wise addition CUTLASS_HOST_DEVICE BatchedGemmCoord operator+(Base const& b) const { return BatchedGemmCoord(Base::operator+(b)); } /// Element-wise subtraction CUTLASS_HOST_DEVICE BatchedGemmCoord operator-(Base const& b) const { return BatchedGemmCoord(Base::operator-(b)); } /// Element-wise multiplication CUTLASS_HOST_DEVICE BatchedGemmCoord operator*(Base const& b) const { return BatchedGemmCoord(Base::operator*(b)); } /// Element-wise division CUTLASS_HOST_DEVICE BatchedGemmCoord operator/(Base const& b) const { return BatchedGemmCoord(Base::operator/(b)); } /// In-place addition CUTLASS_HOST_DEVICE BatchedGemmCoord& operator+=(Base const& b) { Base::operator+=(b); return *this; } /// In-place subtraction CUTLASS_HOST_DEVICE BatchedGemmCoord& operator-=(Base const& b) { Base::operator-=(b); return *this; } /// In-place multiplication CUTLASS_HOST_DEVICE BatchedGemmCoord& operator*=(Base const& b) { Base::operator*=(b); return *this; } /// In-place division CUTLASS_HOST_DEVICE BatchedGemmCoord& operator/=(Base const& b) { Base::operator/=(b); return *this; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/gemm_coord.h/0
{ "file_path": "cutlass/include/cutlass/gemm_coord.h", "repo_id": "cutlass", "token_count": 3582 }
34
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Matrix classes with value semantics. */ #pragma once #if !defined(__CUDACC_RTC__) #include <iosfwd> #include <cmath> #endif #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/fast_math.h" #include "cutlass/layout/matrix.h" namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Primary template with partial specializations to follow template <typename Element, int Rows, int Columns> struct Matrix; ///////////////////////////////////////////////////////////////////////////////////////////////// /// 1-by-2 matrix template class definition template <typename Element_> struct Matrix<Element_, 1, 2> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 1; /// Number of columns in matrix static int const kColumns = 2; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 2; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 1-by-2 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 1-by-2 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1 ) { data[0] = _0_0; data[1] = _0_1; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> transpose() const { Matrix<Element, 2, 1> mt; mt.data[0] = data[0]; mt.data[1] = data[1]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 1 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 1 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> row(int i) const { return slice_1x2(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) { return set_slice_1x2(v, i, 0); } /// Forms a 1-by-2 matrix by horizontally concatenating an Element with an Element CUTLASS_HOST_DEVICE static Matrix hcat(Element lhs, Element rhs) { return Matrix( lhs, rhs); } /// Concatenates this matrix with a an Element to form a 1-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> hcat(Element rhs) const { return Matrix<Element, 1, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 1-by-2 matrix to form a 1-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> hcat(Matrix<Element, 1, 2> const & rhs) const { return Matrix<Element, 1, 4>::hcat(*this, rhs); } /// Concatenates this matrix with a a 1-by-2 matrix to form a 2-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> vcat(Matrix<Element, 1, 2> const & rhs) const { return Matrix<Element, 2, 2>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-2 matrix to form a 3-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> vcat(Matrix<Element, 2, 2> const & rhs) const { return Matrix<Element, 3, 2>::vcat(*this, rhs); } /// Concatenates this matrix with a a 3-by-2 matrix to form a 4-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> vcat(Matrix<Element, 3, 2> const & rhs) const { return Matrix<Element, 4, 2>::vcat(*this, rhs); } /// Elementwise add operator (1-by-2) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; return result; } /// Elementwise add operator (1-by-2) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (1-by-2) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; return *this; } /// Elementwise subtract operator (1-by-2) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; return result; } /// Elementwise subtract operator (1-by-2) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (1-by-2) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; return *this; } /// Elementwise multiply operator (1-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; return result; } /// Scalar multiply operator (1-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; return result; } /// Scalar multiply operator (1-by-2) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (1-by-2) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; return *this; } /// Elementwise divide operator (1-by-2) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; return result; } /// Scalar divide operator (1-by-2) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; return result; } /// Scalar divide operator (1-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (1-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; return *this; } /// Elementwise divide operator (1-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (1-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; return m; } /// Matrix product of size 1-by-1-by-2 CUTLASS_HOST_DEVICE Element product(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const { // k=0 accum += data[0] * rhs.data[0]; // k=1 accum += data[1] * rhs.data[1]; return accum; } /// Matrix product of size 1-by-1-by-2 CUTLASS_HOST_DEVICE Element operator*(Matrix<Element, 2, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> product( Matrix<Element, 2, 2> const &rhs, Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; return accum; } /// Matrix product of size 1-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> operator*(Matrix<Element, 2, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-2-by-2 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 1-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> product( Matrix<Element, 2, 3> const &rhs, Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; return accum; } /// Matrix product of size 1-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> operator*(Matrix<Element, 2, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> product( Matrix<Element, 2, 4> const &rhs, Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; return accum; } /// Matrix product of size 1-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> operator*(Matrix<Element, 2, 4> const &rhs) const { return product(rhs); } /// Dot product of vectors with extent 2 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; return accum; } /// Dot product of vectors with extent 2 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 1, 2> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; return accum; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; return accum; } }; /// Template alias for 1-by-2 matrix template <typename Element> using Matrix1x2 = Matrix<Element, 1, 2>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix1x2<Element> make_Matrix1x2( Element _0_0, Element _0_1 ) { return Matrix1x2<Element>( _0_0, _0_1 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 1-by-3 matrix template class definition template <typename Element_> struct Matrix<Element_, 1, 3> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 1; /// Number of columns in matrix static int const kColumns = 3; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 3; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 1-by-3 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 1-by-3 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> transpose() const { Matrix<Element, 3, 1> mt; mt.data[0] = data[0]; mt.data[1] = data[1]; mt.data[2] = data[2]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 1 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 1 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> row(int i) const { return slice_1x3(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) { return set_slice_1x3(v, i, 0); } /// Forms a 1-by-3 matrix by horizontally concatenating an Element with a 1-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Element lhs, Matrix<Element, 1, 2> const & rhs) { return Matrix( lhs, rhs.at(0, 0), rhs.at(0, 1)); } /// Forms a 1-by-3 matrix by horizontally concatenating a 1-by-2 matrix with an Element CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 1, 2> const & lhs, Element rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs); } /// Concatenates this matrix with a an Element to form a 1-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> hcat(Element rhs) const { return Matrix<Element, 1, 4>::hcat(*this, rhs); } /// Concatenates this matrix with a a 1-by-3 matrix to form a 2-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> vcat(Matrix<Element, 1, 3> const & rhs) const { return Matrix<Element, 2, 3>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-3 matrix to form a 3-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> vcat(Matrix<Element, 2, 3> const & rhs) const { return Matrix<Element, 3, 3>::vcat(*this, rhs); } /// Concatenates this matrix with a a 3-by-3 matrix to form a 4-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> vcat(Matrix<Element, 3, 3> const & rhs) const { return Matrix<Element, 4, 3>::vcat(*this, rhs); } /// Elementwise add operator (1-by-3) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; return result; } /// Elementwise add operator (1-by-3) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (1-by-3) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; return *this; } /// Elementwise subtract operator (1-by-3) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; return result; } /// Elementwise subtract operator (1-by-3) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (1-by-3) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; return *this; } /// Elementwise multiply operator (1-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; return result; } /// Scalar multiply operator (1-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; return result; } /// Scalar multiply operator (1-by-3) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (1-by-3) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; return *this; } /// Elementwise divide operator (1-by-3) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; return result; } /// Scalar divide operator (1-by-3) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; return result; } /// Scalar divide operator (1-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (1-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; return *this; } /// Elementwise divide operator (1-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (1-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; return m; } /// Matrix product of size 1-by-1-by-3 CUTLASS_HOST_DEVICE Element product(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const { // k=0 accum += data[0] * rhs.data[0]; // k=1 accum += data[1] * rhs.data[1]; // k=2 accum += data[2] * rhs.data[2]; return accum; } /// Matrix product of size 1-by-1-by-3 CUTLASS_HOST_DEVICE Element operator*(Matrix<Element, 3, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> product( Matrix<Element, 3, 2> const &rhs, Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; return accum; } /// Matrix product of size 1-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> operator*(Matrix<Element, 3, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> product( Matrix<Element, 3, 3> const &rhs, Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; return accum; } /// Matrix product of size 1-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> operator*(Matrix<Element, 3, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-3-by-3 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 1-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> product( Matrix<Element, 3, 4> const &rhs, Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; return accum; } /// Matrix product of size 1-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> operator*(Matrix<Element, 3, 4> const &rhs) const { return product(rhs); } /// Dot product of vectors with extent 3 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; return accum; } /// Dot product of vectors with extent 3 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 1, 3> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; return accum; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; return accum; } /// Cross product CUTLASS_HOST_DEVICE Matrix cross(Matrix const &rhs) const { return Matrix( data[1] * rhs.data[2] - data[2] * rhs.data[1], data[0] * rhs.data[2] - data[2] * rhs.data[1], data[0] * rhs.data[1] - data[1] * rhs.data[0] ); } }; /// Template alias for 1-by-3 matrix template <typename Element> using Matrix1x3 = Matrix<Element, 1, 3>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix1x3<Element> make_Matrix1x3( Element _0_0, Element _0_1, Element _0_2 ) { return Matrix1x3<Element>( _0_0, _0_1, _0_2 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 1-by-4 matrix template class definition template <typename Element_> struct Matrix<Element_, 1, 4> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 1; /// Number of columns in matrix static int const kColumns = 4; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 4; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 1-by-4 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 1-by-4 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _0_3 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> transpose() const { Matrix<Element, 4, 1> mt; mt.data[0] = data[0]; mt.data[1] = data[1]; mt.data[2] = data[2]; mt.data[3] = data[3]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 1 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 1 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const { Matrix<Element, 1, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> row(int i) const { return slice_1x4(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) { return set_slice_1x4(v, i, 0); } /// Forms a 1-by-4 matrix by horizontally concatenating an Element with a 1-by-3 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Element lhs, Matrix<Element, 1, 3> const & rhs) { return Matrix( lhs, rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)); } /// Forms a 1-by-4 matrix by horizontally concatenating a 1-by-2 matrix with a 1-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 1, 2> const & lhs, Matrix<Element, 1, 2> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)); } /// Forms a 1-by-4 matrix by horizontally concatenating a 1-by-3 matrix with an Element CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 1, 3> const & lhs, Element rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs); } /// Concatenates this matrix with a a 1-by-4 matrix to form a 2-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> vcat(Matrix<Element, 1, 4> const & rhs) const { return Matrix<Element, 2, 4>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-4 matrix to form a 3-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> vcat(Matrix<Element, 2, 4> const & rhs) const { return Matrix<Element, 3, 4>::vcat(*this, rhs); } /// Concatenates this matrix with a a 3-by-4 matrix to form a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> vcat(Matrix<Element, 3, 4> const & rhs) const { return Matrix<Element, 4, 4>::vcat(*this, rhs); } /// Elementwise add operator (1-by-4) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; return result; } /// Elementwise add operator (1-by-4) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (1-by-4) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; return *this; } /// Elementwise subtract operator (1-by-4) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; return result; } /// Elementwise subtract operator (1-by-4) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (1-by-4) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; return *this; } /// Elementwise multiply operator (1-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; return result; } /// Scalar multiply operator (1-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; return result; } /// Scalar multiply operator (1-by-4) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (1-by-4) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; return *this; } /// Elementwise divide operator (1-by-4) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; return result; } /// Scalar divide operator (1-by-4) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; return result; } /// Scalar divide operator (1-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (1-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; return *this; } /// Elementwise divide operator (1-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (1-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; return m; } /// Matrix product of size 1-by-1-by-4 CUTLASS_HOST_DEVICE Element product(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const { // k=0 accum += data[0] * rhs.data[0]; // k=1 accum += data[1] * rhs.data[1]; // k=2 accum += data[2] * rhs.data[2]; // k=3 accum += data[3] * rhs.data[3]; return accum; } /// Matrix product of size 1-by-1-by-4 CUTLASS_HOST_DEVICE Element operator*(Matrix<Element, 4, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> product( Matrix<Element, 4, 2> const &rhs, Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; // k=3 accum.data[0] += data[3] * rhs.data[6]; accum.data[1] += data[3] * rhs.data[7]; return accum; } /// Matrix product of size 1-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> operator*(Matrix<Element, 4, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> product( Matrix<Element, 4, 3> const &rhs, Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; // k=3 accum.data[0] += data[3] * rhs.data[9]; accum.data[1] += data[3] * rhs.data[10]; accum.data[2] += data[3] * rhs.data[11]; return accum; } /// Matrix product of size 1-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> operator*(Matrix<Element, 4, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> product( Matrix<Element, 4, 4> const &rhs, Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; // k=3 accum.data[0] += data[3] * rhs.data[12]; accum.data[1] += data[3] * rhs.data[13]; accum.data[2] += data[3] * rhs.data[14]; accum.data[3] += data[3] * rhs.data[15]; return accum; } /// Matrix product of size 1-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> operator*(Matrix<Element, 4, 4> const &rhs) const { return product(rhs); } /// Matrix product of size 1-by-4-by-4 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) { *this = product(rhs); return *this; } /// Dot product of vectors with extent 4 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; accum += data[3] * rhs.data[3]; return accum; } /// Dot product of vectors with extent 4 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 1, 4> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; accum += data[3] * rhs.data[3]; return accum; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; return accum; } }; /// Template alias for 1-by-4 matrix template <typename Element> using Matrix1x4 = Matrix<Element, 1, 4>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix1x4<Element> make_Matrix1x4( Element _0_0, Element _0_1, Element _0_2, Element _0_3 ) { return Matrix1x4<Element>( _0_0, _0_1, _0_2, _0_3 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 2-by-1 matrix template class definition template <typename Element_> struct Matrix<Element_, 2, 1> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 2; /// Number of columns in matrix static int const kColumns = 1; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 2; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 2-by-1 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 2-by-1 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _1_0 ) { data[0] = _0_0; data[1] = _1_0; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> transpose() const { Matrix<Element, 1, 2> mt; mt.data[0] = data[0]; mt.data[1] = data[1]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 1 + j + 0]; m.data[1] = data[i * 1 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 1 + j + 0] = m.data[0]; data[i * 1 + j + 1] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> column(int j) const { return slice_2x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) { return set_slice_2x1(v, 0, j); } /// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> hcat(Matrix<Element, 2, 1> const & rhs) const { return Matrix<Element, 2, 2>::hcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-2 matrix to form a 2-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> hcat(Matrix<Element, 2, 2> const & rhs) const { return Matrix<Element, 2, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-3 matrix to form a 2-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 3> const & rhs) const { return Matrix<Element, 2, 4>::hcat(*this, rhs); } /// Forms a 2-by-1 matrix by vertically concatenating an Element with an Element CUTLASS_HOST_DEVICE static Matrix vcat(Element upper, Element lower) { return Matrix( upper , lower); } /// Concatenates this matrix with a an Element to form a 3-by-1 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> vcat(Element rhs) const { return Matrix<Element, 3, 1>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-1 matrix to form a 4-by-1 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> vcat(Matrix<Element, 2, 1> const & rhs) const { return Matrix<Element, 4, 1>::vcat(*this, rhs); } /// Elementwise add operator (2-by-1) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; return result; } /// Elementwise add operator (2-by-1) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (2-by-1) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; return *this; } /// Elementwise subtract operator (2-by-1) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; return result; } /// Elementwise subtract operator (2-by-1) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (2-by-1) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; return *this; } /// Elementwise multiply operator (2-by-1) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; return result; } /// Scalar multiply operator (2-by-1) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; return result; } /// Scalar multiply operator (2-by-1) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (2-by-1) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; return *this; } /// Elementwise divide operator (2-by-1) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; return result; } /// Scalar divide operator (2-by-1) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; return result; } /// Scalar divide operator (2-by-1) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (2-by-1) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; return *this; } /// Elementwise divide operator (2-by-1) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (2-by-1) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; return m; } /// Matrix product of size 2-by-1-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> product( Matrix<Element, 1, 1> const &rhs, Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[1] * rhs.data[0]; return accum; } /// Matrix product of size 2-by-1-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> operator*(Matrix<Element, 1, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-1-by-1 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 2-by-2-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> product( Matrix<Element, 1, 2> const &rhs, Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[1] * rhs.data[0]; accum.data[3] += data[1] * rhs.data[1]; return accum; } /// Matrix product of size 2-by-2-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> operator*(Matrix<Element, 1, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-3-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> product( Matrix<Element, 1, 3> const &rhs, Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[1] * rhs.data[0]; accum.data[4] += data[1] * rhs.data[1]; accum.data[5] += data[1] * rhs.data[2]; return accum; } /// Matrix product of size 2-by-3-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> operator*(Matrix<Element, 1, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-4-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> product( Matrix<Element, 1, 4> const &rhs, Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[1] * rhs.data[0]; accum.data[5] += data[1] * rhs.data[1]; accum.data[6] += data[1] * rhs.data[2]; accum.data[7] += data[1] * rhs.data[3]; return accum; } /// Matrix product of size 2-by-4-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> operator*(Matrix<Element, 1, 4> const &rhs) const { return product(rhs); } /// Dot product of vectors with extent 2 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; return accum; } /// Dot product of vectors with extent 2 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 1, 2> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; return accum; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; return accum; } }; /// Template alias for 2-by-1 matrix template <typename Element> using Matrix2x1 = Matrix<Element, 2, 1>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix2x1<Element> make_Matrix2x1( Element _0_0, Element _1_0 ) { return Matrix2x1<Element>( _0_0, _1_0 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 2-by-2 matrix template class definition template <typename Element_> struct Matrix<Element_, 2, 2> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 2; /// Number of columns in matrix static int const kColumns = 2; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 4; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 2-by-2 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 2-by-2 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _1_0, Element _1_1 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _1_0; data[3] = _1_1; } /// Constucts a 2-by-2 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 2> const &row_0, Matrix<Element, 1, 2> const &row_1 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_1.data[0]; data[3] = row_1.data[1]; } /// Static method to construct a 2-by-2 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 2, 1> const &column_0, Matrix<Element, 2, 1> const &column_1 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_0.data[1]; result.data[3] = column_1.data[1]; return result; } /// Constructs an identity matrix CUTLASS_HOST_DEVICE static Matrix identity() { Matrix m; m.data[0] = Element(1); m.data[3] = Element(1); return m; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[3] = diag.data[1]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[3] = diag.data[1]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> diagonal() const { Matrix<Element, 2, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[3]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> transpose() const { Matrix<Element, 2, 2> mt; mt.data[0] = data[0]; mt.data[2] = data[1]; mt.data[1] = data[2]; mt.data[3] = data[3]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> row(int i) const { return slice_1x2(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) { return set_slice_1x2(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 2] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> column(int j) const { return slice_2x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) { return set_slice_2x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; m.data[2] = data[i * 2 + j + 2]; m.data[3] = data[i * 2 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; data[i * 2 + j + 2] = m.data[2]; data[i * 2 + j + 3] = m.data[3]; return *this; } /// Forms a 2-by-2 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 1> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0) , lhs.at(1, 0), rhs.at(1, 0)); } /// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> hcat(Matrix<Element, 2, 1> const & rhs) const { return Matrix<Element, 2, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-2 matrix to form a 2-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 2> const & rhs) const { return Matrix<Element, 2, 4>::hcat(*this, rhs); } /// Forms a 2-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 1-by-2 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 1, 2> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1) , lower.at(0, 0), lower.at(0, 1)); } /// Concatenates this matrix with a a 1-by-2 matrix to form a 3-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> vcat(Matrix<Element, 1, 2> const & rhs) const { return Matrix<Element, 3, 2>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-2 matrix to form a 4-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> vcat(Matrix<Element, 2, 2> const & rhs) const { return Matrix<Element, 4, 2>::vcat(*this, rhs); } /// Forms a 2-by-2 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Element B, Element C, Element D) { return Matrix( A, B , C, D ); } /// Elementwise add operator (2-by-2) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; return result; } /// Elementwise add operator (2-by-2) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (2-by-2) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; return *this; } /// Elementwise subtract operator (2-by-2) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; return result; } /// Elementwise subtract operator (2-by-2) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (2-by-2) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; return *this; } /// Elementwise multiply operator (2-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; return result; } /// Scalar multiply operator (2-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; return result; } /// Scalar multiply operator (2-by-2) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (2-by-2) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; return *this; } /// Elementwise divide operator (2-by-2) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; return result; } /// Scalar divide operator (2-by-2) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; return result; } /// Scalar divide operator (2-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (2-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; return *this; } /// Elementwise divide operator (2-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (2-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; return m; } /// Matrix product of size 2-by-1-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> product( Matrix<Element, 2, 1> const &rhs, Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[2] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[3] * rhs.data[1]; return accum; } /// Matrix product of size 2-by-1-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> operator*(Matrix<Element, 2, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> product( Matrix<Element, 2, 2> const &rhs, Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[2] * rhs.data[0]; accum.data[3] += data[2] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[3] * rhs.data[2]; accum.data[3] += data[3] * rhs.data[3]; return accum; } /// Matrix product of size 2-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> operator*(Matrix<Element, 2, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-2-by-2 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 2-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> product( Matrix<Element, 2, 3> const &rhs, Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[2] * rhs.data[0]; accum.data[4] += data[2] * rhs.data[1]; accum.data[5] += data[2] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[3] * rhs.data[3]; accum.data[4] += data[3] * rhs.data[4]; accum.data[5] += data[3] * rhs.data[5]; return accum; } /// Matrix product of size 2-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> operator*(Matrix<Element, 2, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> product( Matrix<Element, 2, 4> const &rhs, Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[2] * rhs.data[0]; accum.data[5] += data[2] * rhs.data[1]; accum.data[6] += data[2] * rhs.data[2]; accum.data[7] += data[2] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[3] * rhs.data[4]; accum.data[5] += data[3] * rhs.data[5]; accum.data[6] += data[3] * rhs.data[6]; accum.data[7] += data[3] * rhs.data[7]; return accum; } /// Matrix product of size 2-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> operator*(Matrix<Element, 2, 4> const &rhs) const { return product(rhs); } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[3]; return accum; } /// Returns 2-by-2 rotation matrix CUTLASS_HOST_DEVICE static Matrix rotation(Element theta) { Element c = fast_cos(theta); Element s = fast_sin(theta); return Matrix( c, -s, s, c ); } /// Computes the determinant of a 2-by-2 matrix CUTLASS_HOST_DEVICE Element determinant(Element accum = Element()) const { accum += data[0] * data[3] - data[1] * data[2]; return accum; } /// Computes the inverse of a 2-by-2 matrix given /// the matrix's determinant CUTLASS_HOST_DEVICE Matrix inverse(Element det) const { return Matrix( data[3], -data[1], -data[2], data[0] ) * (Element(1) / det); } /// Computes the inverse of a 2-by-2 matrix. CUTLASS_HOST_DEVICE Matrix inverse() const { return inverse(determinant()); } }; /// Template alias for 2-by-2 matrix template <typename Element> using Matrix2x2 = Matrix<Element, 2, 2>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix2x2<Element> make_Matrix2x2( Element _0_0, Element _0_1, Element _1_0, Element _1_1 ) { return Matrix2x2<Element>( _0_0, _0_1, _1_0, _1_1 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 2-by-3 matrix template class definition template <typename Element_> struct Matrix<Element_, 2, 3> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 2; /// Number of columns in matrix static int const kColumns = 3; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 6; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 2-by-3 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 2-by-3 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _1_0, Element _1_1, Element _1_2 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _1_0; data[4] = _1_1; data[5] = _1_2; } /// Constucts a 2-by-3 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 3> const &row_0, Matrix<Element, 1, 3> const &row_1 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_0.data[2]; data[3] = row_1.data[0]; data[4] = row_1.data[1]; data[5] = row_1.data[2]; } /// Static method to construct a 2-by-3 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 3, 1> const &column_0, Matrix<Element, 3, 1> const &column_1, Matrix<Element, 3, 1> const &column_2 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_2.data[0]; result.data[3] = column_0.data[1]; result.data[4] = column_1.data[1]; result.data[5] = column_2.data[1]; return result; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[3] = diag.data[1]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[3] = diag.data[1]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> diagonal() const { Matrix<Element, 2, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[3]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> transpose() const { Matrix<Element, 3, 2> mt; mt.data[0] = data[0]; mt.data[2] = data[1]; mt.data[4] = data[2]; mt.data[1] = data[3]; mt.data[3] = data[4]; mt.data[5] = data[5]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> row(int i) const { return slice_1x3(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) { return set_slice_1x3(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 3] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> column(int j) const { return slice_2x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) { return set_slice_2x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 3]; m.data[3] = data[i * 3 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 3] = m.data[2]; data[i * 3 + j + 4] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const { Matrix<Element, 2, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; m.data[3] = data[i * 3 + j + 3]; m.data[4] = data[i * 3 + j + 4]; m.data[5] = data[i * 3 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; data[i * 3 + j + 3] = m.data[3]; data[i * 3 + j + 4] = m.data[4]; data[i * 3 + j + 5] = m.data[5]; return *this; } /// Forms a 2-by-3 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 2> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1) , lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1)); } /// Forms a 2-by-3 matrix by horizontally concatenating a 2-by-2 matrix with a 2-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 2, 2> const & lhs, Matrix<Element, 2, 1> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0) , lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0)); } /// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 1> const & rhs) const { return Matrix<Element, 2, 4>::hcat(*this, rhs); } /// Forms a 2-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 1-by-3 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 1, 3> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)); } /// Concatenates this matrix with a a 1-by-3 matrix to form a 3-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> vcat(Matrix<Element, 1, 3> const & rhs) const { return Matrix<Element, 3, 3>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-3 matrix to form a 4-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> vcat(Matrix<Element, 2, 3> const & rhs) const { return Matrix<Element, 4, 3>::vcat(*this, rhs); } /// Forms a 2-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Matrix<Element, 1, 2> const & B, Element C, Matrix<Element, 1, 2> const & D) { return Matrix( A, B.at(0, 0), B.at(0, 1) , C, D.at(0, 0), D.at(0, 1) ); } /// Forms a 2-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 2> const & A, Element B, Matrix<Element, 1, 2> const & C, Element D) { return Matrix( A.at(0, 0), A.at(0, 1), B , C.at(0, 0), C.at(0, 1), D ); } /// Elementwise add operator (2-by-3) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; return result; } /// Elementwise add operator (2-by-3) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (2-by-3) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; return *this; } /// Elementwise subtract operator (2-by-3) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; return result; } /// Elementwise subtract operator (2-by-3) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (2-by-3) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; return *this; } /// Elementwise multiply operator (2-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; return result; } /// Scalar multiply operator (2-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; return result; } /// Scalar multiply operator (2-by-3) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (2-by-3) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; return *this; } /// Elementwise divide operator (2-by-3) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; return result; } /// Scalar divide operator (2-by-3) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; return result; } /// Scalar divide operator (2-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (2-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; return *this; } /// Elementwise divide operator (2-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (2-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; return m; } /// Matrix product of size 2-by-1-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> product( Matrix<Element, 3, 1> const &rhs, Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[3] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[4] * rhs.data[1]; // k=2 accum.data[0] += data[2] * rhs.data[2]; accum.data[1] += data[5] * rhs.data[2]; return accum; } /// Matrix product of size 2-by-1-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> operator*(Matrix<Element, 3, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> product( Matrix<Element, 3, 2> const &rhs, Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[3] * rhs.data[0]; accum.data[3] += data[3] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[4] * rhs.data[2]; accum.data[3] += data[4] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; accum.data[2] += data[5] * rhs.data[4]; accum.data[3] += data[5] * rhs.data[5]; return accum; } /// Matrix product of size 2-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> operator*(Matrix<Element, 3, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> product( Matrix<Element, 3, 3> const &rhs, Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[3] * rhs.data[0]; accum.data[4] += data[3] * rhs.data[1]; accum.data[5] += data[3] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[4] * rhs.data[3]; accum.data[4] += data[4] * rhs.data[4]; accum.data[5] += data[4] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; accum.data[3] += data[5] * rhs.data[6]; accum.data[4] += data[5] * rhs.data[7]; accum.data[5] += data[5] * rhs.data[8]; return accum; } /// Matrix product of size 2-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> operator*(Matrix<Element, 3, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-3-by-3 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 2-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> product( Matrix<Element, 3, 4> const &rhs, Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[3] * rhs.data[0]; accum.data[5] += data[3] * rhs.data[1]; accum.data[6] += data[3] * rhs.data[2]; accum.data[7] += data[3] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[4] * rhs.data[4]; accum.data[5] += data[4] * rhs.data[5]; accum.data[6] += data[4] * rhs.data[6]; accum.data[7] += data[4] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; accum.data[4] += data[5] * rhs.data[8]; accum.data[5] += data[5] * rhs.data[9]; accum.data[6] += data[5] * rhs.data[10]; accum.data[7] += data[5] * rhs.data[11]; return accum; } /// Matrix product of size 2-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> operator*(Matrix<Element, 3, 4> const &rhs) const { return product(rhs); } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[4]; return accum; } }; /// Template alias for 2-by-3 matrix template <typename Element> using Matrix2x3 = Matrix<Element, 2, 3>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix2x3<Element> make_Matrix2x3( Element _0_0, Element _0_1, Element _0_2, Element _1_0, Element _1_1, Element _1_2 ) { return Matrix2x3<Element>( _0_0, _0_1, _0_2, _1_0, _1_1, _1_2 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 2-by-4 matrix template class definition template <typename Element_> struct Matrix<Element_, 2, 4> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 2; /// Number of columns in matrix static int const kColumns = 4; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 8; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 2-by-4 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 2-by-4 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _0_3, Element _1_0, Element _1_1, Element _1_2, Element _1_3 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3; data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3; } /// Constucts a 2-by-4 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 4> const &row_0, Matrix<Element, 1, 4> const &row_1 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_0.data[2]; data[3] = row_0.data[3]; data[4] = row_1.data[0]; data[5] = row_1.data[1]; data[6] = row_1.data[2]; data[7] = row_1.data[3]; } /// Static method to construct a 2-by-4 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 4, 1> const &column_0, Matrix<Element, 4, 1> const &column_1, Matrix<Element, 4, 1> const &column_2, Matrix<Element, 4, 1> const &column_3 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_2.data[0]; result.data[3] = column_3.data[0]; result.data[4] = column_0.data[1]; result.data[5] = column_1.data[1]; result.data[6] = column_2.data[1]; result.data[7] = column_3.data[1]; return result; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; m.data[6] = s; m.data[7] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[3] = diag.data[1]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[3] = diag.data[1]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> diagonal() const { Matrix<Element, 2, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[3]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> transpose() const { Matrix<Element, 4, 2> mt; mt.data[0] = data[0]; mt.data[2] = data[1]; mt.data[4] = data[2]; mt.data[6] = data[3]; mt.data[1] = data[4]; mt.data[3] = data[5]; mt.data[5] = data[6]; mt.data[7] = data[7]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 2 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const { Matrix<Element, 1, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> row(int i) const { return slice_1x4(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) { return set_slice_1x4(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 4] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> column(int j) const { return slice_2x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) { return set_slice_2x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 4]; m.data[3] = data[i * 4 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 4] = m.data[2]; data[i * 4 + j + 5] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const { Matrix<Element, 2, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 4]; m.data[4] = data[i * 4 + j + 5]; m.data[5] = data[i * 4 + j + 6]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 4] = m.data[3]; data[i * 4 + j + 5] = m.data[4]; data[i * 4 + j + 6] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const { Matrix<Element, 2, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; m.data[4] = data[i * 4 + j + 4]; m.data[5] = data[i * 4 + j + 5]; m.data[6] = data[i * 4 + j + 6]; m.data[7] = data[i * 4 + j + 7]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; data[i * 4 + j + 4] = m.data[4]; data[i * 4 + j + 5] = m.data[5]; data[i * 4 + j + 6] = m.data[6]; data[i * 4 + j + 7] = m.data[7]; return *this; } /// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-3 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 3> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2) , lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2)); } /// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-2 matrix with a 2-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 2, 2> const & lhs, Matrix<Element, 2, 2> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1) , lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1)); } /// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-3 matrix with a 2-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 2, 3> const & lhs, Matrix<Element, 2, 1> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0) , lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0)); } /// Forms a 2-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 1-by-4 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 1, 4> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)); } /// Concatenates this matrix with a a 1-by-4 matrix to form a 3-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> vcat(Matrix<Element, 1, 4> const & rhs) const { return Matrix<Element, 3, 4>::vcat(*this, rhs); } /// Concatenates this matrix with a a 2-by-4 matrix to form a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> vcat(Matrix<Element, 2, 4> const & rhs) const { return Matrix<Element, 4, 4>::vcat(*this, rhs); } /// Forms a 2-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Matrix<Element, 1, 3> const & B, Element C, Matrix<Element, 1, 3> const & D) { return Matrix( A, B.at(0, 0), B.at(0, 1), B.at(0, 2) , C, D.at(0, 0), D.at(0, 1), D.at(0, 2) ); } /// Forms a 2-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B, Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1) , C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1) ); } /// Forms a 2-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 3> const & A, Element B, Matrix<Element, 1, 3> const & C, Element D) { return Matrix( A.at(0, 0), A.at(0, 1), A.at(0, 2), B , C.at(0, 0), C.at(0, 1), C.at(0, 2), D ); } /// Elementwise add operator (2-by-4) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; result.data[6] = data[6] + rhs.data[6]; result.data[7] = data[7] + rhs.data[7]; return result; } /// Elementwise add operator (2-by-4) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (2-by-4) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; data[6] += rhs.data[6]; data[7] += rhs.data[7]; return *this; } /// Elementwise subtract operator (2-by-4) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; result.data[6] = data[6] - rhs.data[6]; result.data[7] = data[7] - rhs.data[7]; return result; } /// Elementwise subtract operator (2-by-4) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (2-by-4) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; data[6] -= rhs.data[6]; data[7] -= rhs.data[7]; return *this; } /// Elementwise multiply operator (2-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; result.data[6] = data[6] * rhs.data[6]; result.data[7] = data[7] * rhs.data[7]; return result; } /// Scalar multiply operator (2-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; result.data[6] = data[6] * s; result.data[7] = data[7] * s; return result; } /// Scalar multiply operator (2-by-4) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (2-by-4) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; data[6] *= s; data[7] *= s; return *this; } /// Elementwise divide operator (2-by-4) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; result.data[6] = data[6] / rhs.data[6]; result.data[7] = data[7] / rhs.data[7]; return result; } /// Scalar divide operator (2-by-4) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; result.data[6] = data[6] / s; result.data[7] = data[7] / s; return result; } /// Scalar divide operator (2-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (2-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; data[6] /= s; data[7] /= s; return *this; } /// Elementwise divide operator (2-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (2-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; data[6] /= rhs.data[6]; data[7] /= rhs.data[7]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; m.data[6] = -m.data[6]; m.data[7] = -m.data[7]; return m; } /// Matrix product of size 2-by-1-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> product( Matrix<Element, 4, 1> const &rhs, Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[4] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[5] * rhs.data[1]; // k=2 accum.data[0] += data[2] * rhs.data[2]; accum.data[1] += data[6] * rhs.data[2]; // k=3 accum.data[0] += data[3] * rhs.data[3]; accum.data[1] += data[7] * rhs.data[3]; return accum; } /// Matrix product of size 2-by-1-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> operator*(Matrix<Element, 4, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> product( Matrix<Element, 4, 2> const &rhs, Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[4] * rhs.data[0]; accum.data[3] += data[4] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[5] * rhs.data[2]; accum.data[3] += data[5] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; accum.data[2] += data[6] * rhs.data[4]; accum.data[3] += data[6] * rhs.data[5]; // k=3 accum.data[0] += data[3] * rhs.data[6]; accum.data[1] += data[3] * rhs.data[7]; accum.data[2] += data[7] * rhs.data[6]; accum.data[3] += data[7] * rhs.data[7]; return accum; } /// Matrix product of size 2-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> operator*(Matrix<Element, 4, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> product( Matrix<Element, 4, 3> const &rhs, Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[4] * rhs.data[0]; accum.data[4] += data[4] * rhs.data[1]; accum.data[5] += data[4] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[5] * rhs.data[3]; accum.data[4] += data[5] * rhs.data[4]; accum.data[5] += data[5] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; accum.data[3] += data[6] * rhs.data[6]; accum.data[4] += data[6] * rhs.data[7]; accum.data[5] += data[6] * rhs.data[8]; // k=3 accum.data[0] += data[3] * rhs.data[9]; accum.data[1] += data[3] * rhs.data[10]; accum.data[2] += data[3] * rhs.data[11]; accum.data[3] += data[7] * rhs.data[9]; accum.data[4] += data[7] * rhs.data[10]; accum.data[5] += data[7] * rhs.data[11]; return accum; } /// Matrix product of size 2-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> operator*(Matrix<Element, 4, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> product( Matrix<Element, 4, 4> const &rhs, Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[4] * rhs.data[0]; accum.data[5] += data[4] * rhs.data[1]; accum.data[6] += data[4] * rhs.data[2]; accum.data[7] += data[4] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[5] * rhs.data[4]; accum.data[5] += data[5] * rhs.data[5]; accum.data[6] += data[5] * rhs.data[6]; accum.data[7] += data[5] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; accum.data[4] += data[6] * rhs.data[8]; accum.data[5] += data[6] * rhs.data[9]; accum.data[6] += data[6] * rhs.data[10]; accum.data[7] += data[6] * rhs.data[11]; // k=3 accum.data[0] += data[3] * rhs.data[12]; accum.data[1] += data[3] * rhs.data[13]; accum.data[2] += data[3] * rhs.data[14]; accum.data[3] += data[3] * rhs.data[15]; accum.data[4] += data[7] * rhs.data[12]; accum.data[5] += data[7] * rhs.data[13]; accum.data[6] += data[7] * rhs.data[14]; accum.data[7] += data[7] * rhs.data[15]; return accum; } /// Matrix product of size 2-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> operator*(Matrix<Element, 4, 4> const &rhs) const { return product(rhs); } /// Matrix product of size 2-by-4-by-4 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) { *this = product(rhs); return *this; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; accum += data[6]; accum += data[7]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; accum += data[6] * data[6]; accum += data[7] * data[7]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[5]; return accum; } }; /// Template alias for 2-by-4 matrix template <typename Element> using Matrix2x4 = Matrix<Element, 2, 4>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix2x4<Element> make_Matrix2x4( Element _0_0, Element _0_1, Element _0_2, Element _0_3, Element _1_0, Element _1_1, Element _1_2, Element _1_3 ) { return Matrix2x4<Element>( _0_0, _0_1, _0_2, _0_3, _1_0, _1_1, _1_2, _1_3 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 3-by-1 matrix template class definition template <typename Element_> struct Matrix<Element_, 3, 1> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 3; /// Number of columns in matrix static int const kColumns = 1; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 3; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 3-by-1 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 3-by-1 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _1_0, Element _2_0 ) { data[0] = _0_0; data[1] = _1_0; data[2] = _2_0; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> transpose() const { Matrix<Element, 1, 3> mt; mt.data[0] = data[0]; mt.data[1] = data[1]; mt.data[2] = data[2]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 1 + j + 0]; m.data[1] = data[i * 1 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 1 + j + 0] = m.data[0]; data[i * 1 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 1 + j + 0]; m.data[1] = data[i * 1 + j + 1]; m.data[2] = data[i * 1 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 1 + j + 0] = m.data[0]; data[i * 1 + j + 1] = m.data[1]; data[i * 1 + j + 2] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> column(int j) const { return slice_3x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) { return set_slice_3x1(v, 0, j); } /// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> hcat(Matrix<Element, 3, 1> const & rhs) const { return Matrix<Element, 3, 2>::hcat(*this, rhs); } /// Concatenates this matrix with a a 3-by-2 matrix to form a 3-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> hcat(Matrix<Element, 3, 2> const & rhs) const { return Matrix<Element, 3, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 3-by-3 matrix to form a 3-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 3> const & rhs) const { return Matrix<Element, 3, 4>::hcat(*this, rhs); } /// Forms a 3-by-1 matrix by vertically concatenating an Element with a 2-by-1 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Element upper, Matrix<Element, 2, 1> const & lower) { return Matrix( upper , lower.at(0, 0) , lower.at(1, 0)); } /// Forms a 3-by-1 matrix by vertically concatenating a 2-by-1 matrix with an Element CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 1> const & upper, Element lower) { return Matrix( upper.at(0, 0) , upper.at(1, 0) , lower); } /// Concatenates this matrix with a an Element to form a 4-by-1 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> vcat(Element rhs) const { return Matrix<Element, 4, 1>::vcat(*this, rhs); } /// Elementwise add operator (3-by-1) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; return result; } /// Elementwise add operator (3-by-1) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (3-by-1) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; return *this; } /// Elementwise subtract operator (3-by-1) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; return result; } /// Elementwise subtract operator (3-by-1) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (3-by-1) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; return *this; } /// Elementwise multiply operator (3-by-1) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; return result; } /// Scalar multiply operator (3-by-1) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; return result; } /// Scalar multiply operator (3-by-1) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (3-by-1) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; return *this; } /// Elementwise divide operator (3-by-1) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; return result; } /// Scalar divide operator (3-by-1) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; return result; } /// Scalar divide operator (3-by-1) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (3-by-1) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; return *this; } /// Elementwise divide operator (3-by-1) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (3-by-1) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; return m; } /// Matrix product of size 3-by-1-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> product( Matrix<Element, 1, 1> const &rhs, Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[1] * rhs.data[0]; accum.data[2] += data[2] * rhs.data[0]; return accum; } /// Matrix product of size 3-by-1-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> operator*(Matrix<Element, 1, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-1-by-1 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 3-by-2-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> product( Matrix<Element, 1, 2> const &rhs, Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[1] * rhs.data[0]; accum.data[3] += data[1] * rhs.data[1]; accum.data[4] += data[2] * rhs.data[0]; accum.data[5] += data[2] * rhs.data[1]; return accum; } /// Matrix product of size 3-by-2-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> operator*(Matrix<Element, 1, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-3-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> product( Matrix<Element, 1, 3> const &rhs, Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[1] * rhs.data[0]; accum.data[4] += data[1] * rhs.data[1]; accum.data[5] += data[1] * rhs.data[2]; accum.data[6] += data[2] * rhs.data[0]; accum.data[7] += data[2] * rhs.data[1]; accum.data[8] += data[2] * rhs.data[2]; return accum; } /// Matrix product of size 3-by-3-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> operator*(Matrix<Element, 1, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-4-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> product( Matrix<Element, 1, 4> const &rhs, Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[1] * rhs.data[0]; accum.data[5] += data[1] * rhs.data[1]; accum.data[6] += data[1] * rhs.data[2]; accum.data[7] += data[1] * rhs.data[3]; accum.data[8] += data[2] * rhs.data[0]; accum.data[9] += data[2] * rhs.data[1]; accum.data[10] += data[2] * rhs.data[2]; accum.data[11] += data[2] * rhs.data[3]; return accum; } /// Matrix product of size 3-by-4-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> operator*(Matrix<Element, 1, 4> const &rhs) const { return product(rhs); } /// Dot product of vectors with extent 3 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; return accum; } /// Dot product of vectors with extent 3 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 1, 3> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; return accum; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; return accum; } /// Cross product CUTLASS_HOST_DEVICE Matrix cross(Matrix const &rhs) const { return Matrix( data[1] * rhs.data[2] - data[2] * rhs.data[1], data[0] * rhs.data[2] - data[2] * rhs.data[1], data[0] * rhs.data[1] - data[1] * rhs.data[0] ); } }; /// Template alias for 3-by-1 matrix template <typename Element> using Matrix3x1 = Matrix<Element, 3, 1>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix3x1<Element> make_Matrix3x1( Element _0_0, Element _1_0, Element _2_0 ) { return Matrix3x1<Element>( _0_0, _1_0, _2_0 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 3-by-2 matrix template class definition template <typename Element_> struct Matrix<Element_, 3, 2> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 3; /// Number of columns in matrix static int const kColumns = 2; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 6; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 3-by-2 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 3-by-2 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _1_0, Element _1_1, Element _2_0, Element _2_1 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _1_0; data[3] = _1_1; data[4] = _2_0; data[5] = _2_1; } /// Constucts a 3-by-2 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 2> const &row_0, Matrix<Element, 1, 2> const &row_1, Matrix<Element, 1, 2> const &row_2 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_1.data[0]; data[3] = row_1.data[1]; data[4] = row_2.data[0]; data[5] = row_2.data[1]; } /// Static method to construct a 3-by-2 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 2, 1> const &column_0, Matrix<Element, 2, 1> const &column_1 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_0.data[1]; result.data[3] = column_1.data[1]; result.data[4] = column_0.data[2]; result.data[5] = column_1.data[2]; return result; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[4] = diag.data[1]; m.data[8] = diag.data[2]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[4] = diag.data[1]; m.data[8] = diag.data[2]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> diagonal() const { Matrix<Element, 2, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[4]; diag.data[2] = data[8]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> transpose() const { Matrix<Element, 2, 3> mt; mt.data[0] = data[0]; mt.data[3] = data[1]; mt.data[1] = data[2]; mt.data[4] = data[3]; mt.data[2] = data[4]; mt.data[5] = data[5]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> row(int i) const { return slice_1x2(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) { return set_slice_1x2(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 2] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; m.data[2] = data[i * 2 + j + 2]; m.data[3] = data[i * 2 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; data[i * 2 + j + 2] = m.data[2]; data[i * 2 + j + 3] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 2]; m.data[2] = data[i * 2 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 2] = m.data[1]; data[i * 2 + j + 4] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> column(int j) const { return slice_3x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) { return set_slice_3x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const { Matrix<Element, 3, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; m.data[2] = data[i * 2 + j + 2]; m.data[3] = data[i * 2 + j + 3]; m.data[4] = data[i * 2 + j + 4]; m.data[5] = data[i * 2 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; data[i * 2 + j + 2] = m.data[2]; data[i * 2 + j + 3] = m.data[3]; data[i * 2 + j + 4] = m.data[4]; data[i * 2 + j + 5] = m.data[5]; return *this; } /// Forms a 3-by-2 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 1> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0) , lhs.at(1, 0), rhs.at(1, 0) , lhs.at(2, 0), rhs.at(2, 0)); } /// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> hcat(Matrix<Element, 3, 1> const & rhs) const { return Matrix<Element, 3, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 3-by-2 matrix to form a 3-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 2> const & rhs) const { return Matrix<Element, 3, 4>::hcat(*this, rhs); } /// Forms a 3-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 2-by-2 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 2, 2> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1) , lower.at(0, 0), lower.at(0, 1) , lower.at(1, 0), lower.at(1, 1)); } /// Forms a 3-by-2 matrix by vertically concatenating a 2-by-2 matrix with a 1-by-2 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 2> const & upper, Matrix<Element, 1, 2> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1) , upper.at(1, 0), upper.at(1, 1) , lower.at(0, 0), lower.at(0, 1)); } /// Concatenates this matrix with a a 1-by-2 matrix to form a 4-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> vcat(Matrix<Element, 1, 2> const & rhs) const { return Matrix<Element, 4, 2>::vcat(*this, rhs); } /// Forms a 3-by-2 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Element B, Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 1> const & D) { return Matrix( A, B , C.at(0, 0), D.at(0, 0) , C.at(1, 0), D.at(1, 0) ); } /// Forms a 3-by-2 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 1> const & B, Element C, Element D) { return Matrix( A.at(0, 0), B.at(0, 0) , A.at(1, 0), B.at(1, 0) , C, D ); } /// Elementwise add operator (3-by-2) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; return result; } /// Elementwise add operator (3-by-2) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (3-by-2) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; return *this; } /// Elementwise subtract operator (3-by-2) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; return result; } /// Elementwise subtract operator (3-by-2) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (3-by-2) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; return *this; } /// Elementwise multiply operator (3-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; return result; } /// Scalar multiply operator (3-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; return result; } /// Scalar multiply operator (3-by-2) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (3-by-2) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; return *this; } /// Elementwise divide operator (3-by-2) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; return result; } /// Scalar divide operator (3-by-2) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; return result; } /// Scalar divide operator (3-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (3-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; return *this; } /// Elementwise divide operator (3-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (3-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; return m; } /// Matrix product of size 3-by-1-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> product( Matrix<Element, 2, 1> const &rhs, Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[2] * rhs.data[0]; accum.data[2] += data[4] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[3] * rhs.data[1]; accum.data[2] += data[5] * rhs.data[1]; return accum; } /// Matrix product of size 3-by-1-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> operator*(Matrix<Element, 2, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> product( Matrix<Element, 2, 2> const &rhs, Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[2] * rhs.data[0]; accum.data[3] += data[2] * rhs.data[1]; accum.data[4] += data[4] * rhs.data[0]; accum.data[5] += data[4] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[3] * rhs.data[2]; accum.data[3] += data[3] * rhs.data[3]; accum.data[4] += data[5] * rhs.data[2]; accum.data[5] += data[5] * rhs.data[3]; return accum; } /// Matrix product of size 3-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> operator*(Matrix<Element, 2, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-2-by-2 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 3-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> product( Matrix<Element, 2, 3> const &rhs, Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[2] * rhs.data[0]; accum.data[4] += data[2] * rhs.data[1]; accum.data[5] += data[2] * rhs.data[2]; accum.data[6] += data[4] * rhs.data[0]; accum.data[7] += data[4] * rhs.data[1]; accum.data[8] += data[4] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[3] * rhs.data[3]; accum.data[4] += data[3] * rhs.data[4]; accum.data[5] += data[3] * rhs.data[5]; accum.data[6] += data[5] * rhs.data[3]; accum.data[7] += data[5] * rhs.data[4]; accum.data[8] += data[5] * rhs.data[5]; return accum; } /// Matrix product of size 3-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> operator*(Matrix<Element, 2, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> product( Matrix<Element, 2, 4> const &rhs, Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[2] * rhs.data[0]; accum.data[5] += data[2] * rhs.data[1]; accum.data[6] += data[2] * rhs.data[2]; accum.data[7] += data[2] * rhs.data[3]; accum.data[8] += data[4] * rhs.data[0]; accum.data[9] += data[4] * rhs.data[1]; accum.data[10] += data[4] * rhs.data[2]; accum.data[11] += data[4] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[3] * rhs.data[4]; accum.data[5] += data[3] * rhs.data[5]; accum.data[6] += data[3] * rhs.data[6]; accum.data[7] += data[3] * rhs.data[7]; accum.data[8] += data[5] * rhs.data[4]; accum.data[9] += data[5] * rhs.data[5]; accum.data[10] += data[5] * rhs.data[6]; accum.data[11] += data[5] * rhs.data[7]; return accum; } /// Matrix product of size 3-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> operator*(Matrix<Element, 2, 4> const &rhs) const { return product(rhs); } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[3]; return accum; } }; /// Template alias for 3-by-2 matrix template <typename Element> using Matrix3x2 = Matrix<Element, 3, 2>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix3x2<Element> make_Matrix3x2( Element _0_0, Element _0_1, Element _1_0, Element _1_1, Element _2_0, Element _2_1 ) { return Matrix3x2<Element>( _0_0, _0_1, _1_0, _1_1, _2_0, _2_1 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 3-by-3 matrix template class definition template <typename Element_> struct Matrix<Element_, 3, 3> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 3; /// Number of columns in matrix static int const kColumns = 3; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 9; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 3-by-3 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 3-by-3 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _1_0, Element _1_1, Element _1_2, Element _2_0, Element _2_1, Element _2_2 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _1_0; data[4] = _1_1; data[5] = _1_2; data[6] = _2_0; data[7] = _2_1; data[8] = _2_2; } /// Constucts a 3-by-3 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 3> const &row_0, Matrix<Element, 1, 3> const &row_1, Matrix<Element, 1, 3> const &row_2 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_0.data[2]; data[3] = row_1.data[0]; data[4] = row_1.data[1]; data[5] = row_1.data[2]; data[6] = row_2.data[0]; data[7] = row_2.data[1]; data[8] = row_2.data[2]; } /// Static method to construct a 3-by-3 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 3, 1> const &column_0, Matrix<Element, 3, 1> const &column_1, Matrix<Element, 3, 1> const &column_2 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_2.data[0]; result.data[3] = column_0.data[1]; result.data[4] = column_1.data[1]; result.data[5] = column_2.data[1]; result.data[6] = column_0.data[2]; result.data[7] = column_1.data[2]; result.data[8] = column_2.data[2]; return result; } /// Constructs an identity matrix CUTLASS_HOST_DEVICE static Matrix identity() { Matrix m; m.data[0] = Element(1); m.data[4] = Element(1); m.data[8] = Element(1); return m; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; m.data[6] = s; m.data[7] = s; m.data[8] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[4] = diag.data[1]; m.data[8] = diag.data[2]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[4] = diag.data[1]; m.data[8] = diag.data[2]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> diagonal() const { Matrix<Element, 3, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[4]; diag.data[2] = data[8]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> transpose() const { Matrix<Element, 3, 3> mt; mt.data[0] = data[0]; mt.data[3] = data[1]; mt.data[6] = data[2]; mt.data[1] = data[3]; mt.data[4] = data[4]; mt.data[7] = data[5]; mt.data[2] = data[6]; mt.data[5] = data[7]; mt.data[8] = data[8]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> row(int i) const { return slice_1x3(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) { return set_slice_1x3(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 3] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 3]; m.data[3] = data[i * 3 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 3] = m.data[2]; data[i * 3 + j + 4] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const { Matrix<Element, 2, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; m.data[3] = data[i * 3 + j + 3]; m.data[4] = data[i * 3 + j + 4]; m.data[5] = data[i * 3 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; data[i * 3 + j + 3] = m.data[3]; data[i * 3 + j + 4] = m.data[4]; data[i * 3 + j + 5] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 3]; m.data[2] = data[i * 3 + j + 6]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 3] = m.data[1]; data[i * 3 + j + 6] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> column(int j) const { return slice_3x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) { return set_slice_3x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const { Matrix<Element, 3, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 3]; m.data[3] = data[i * 3 + j + 4]; m.data[4] = data[i * 3 + j + 6]; m.data[5] = data[i * 3 + j + 7]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 3] = m.data[2]; data[i * 3 + j + 4] = m.data[3]; data[i * 3 + j + 6] = m.data[4]; data[i * 3 + j + 7] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const { Matrix<Element, 3, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; m.data[3] = data[i * 3 + j + 3]; m.data[4] = data[i * 3 + j + 4]; m.data[5] = data[i * 3 + j + 5]; m.data[6] = data[i * 3 + j + 6]; m.data[7] = data[i * 3 + j + 7]; m.data[8] = data[i * 3 + j + 8]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; data[i * 3 + j + 3] = m.data[3]; data[i * 3 + j + 4] = m.data[4]; data[i * 3 + j + 5] = m.data[5]; data[i * 3 + j + 6] = m.data[6]; data[i * 3 + j + 7] = m.data[7]; data[i * 3 + j + 8] = m.data[8]; return *this; } /// Forms a 3-by-3 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 2> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1) , lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1) , lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1)); } /// Forms a 3-by-3 matrix by horizontally concatenating a 3-by-2 matrix with a 3-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 3, 2> const & lhs, Matrix<Element, 3, 1> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0) , lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0) , lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0)); } /// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 1> const & rhs) const { return Matrix<Element, 3, 4>::hcat(*this, rhs); } /// Forms a 3-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 2-by-3 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 2, 3> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2) , lower.at(1, 0), lower.at(1, 1), lower.at(1, 2)); } /// Forms a 3-by-3 matrix by vertically concatenating a 2-by-3 matrix with a 1-by-3 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 3> const & upper, Matrix<Element, 1, 3> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2) , upper.at(1, 0), upper.at(1, 1), upper.at(1, 2) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)); } /// Concatenates this matrix with a a 1-by-3 matrix to form a 4-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> vcat(Matrix<Element, 1, 3> const & rhs) const { return Matrix<Element, 4, 3>::vcat(*this, rhs); } /// Forms a 3-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Matrix<Element, 1, 2> const & B, Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 2> const & D) { return Matrix( A, B.at(0, 0), B.at(0, 1) , C.at(0, 0), D.at(0, 0), D.at(0, 1) , C.at(1, 0), D.at(1, 0), D.at(1, 1) ); } /// Forms a 3-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 2> const & A, Element B, Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 1> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B , C.at(0, 0), C.at(0, 1), D.at(0, 0) , C.at(1, 0), C.at(1, 1), D.at(1, 0) ); } /// Forms a 3-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 2> const & B, Element C, Matrix<Element, 1, 2> const & D) { return Matrix( A.at(0, 0), B.at(0, 0), B.at(0, 1) , A.at(1, 0), B.at(1, 0), B.at(1, 1) , C, D.at(0, 0), D.at(0, 1) ); } /// Forms a 3-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 1> const & B, Matrix<Element, 1, 2> const & C, Element D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0) , A.at(1, 0), A.at(1, 1), B.at(1, 0) , C.at(0, 0), C.at(0, 1), D ); } /// Elementwise add operator (3-by-3) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; result.data[6] = data[6] + rhs.data[6]; result.data[7] = data[7] + rhs.data[7]; result.data[8] = data[8] + rhs.data[8]; return result; } /// Elementwise add operator (3-by-3) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (3-by-3) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; data[6] += rhs.data[6]; data[7] += rhs.data[7]; data[8] += rhs.data[8]; return *this; } /// Elementwise subtract operator (3-by-3) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; result.data[6] = data[6] - rhs.data[6]; result.data[7] = data[7] - rhs.data[7]; result.data[8] = data[8] - rhs.data[8]; return result; } /// Elementwise subtract operator (3-by-3) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (3-by-3) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; data[6] -= rhs.data[6]; data[7] -= rhs.data[7]; data[8] -= rhs.data[8]; return *this; } /// Elementwise multiply operator (3-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; result.data[6] = data[6] * rhs.data[6]; result.data[7] = data[7] * rhs.data[7]; result.data[8] = data[8] * rhs.data[8]; return result; } /// Scalar multiply operator (3-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; result.data[6] = data[6] * s; result.data[7] = data[7] * s; result.data[8] = data[8] * s; return result; } /// Scalar multiply operator (3-by-3) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (3-by-3) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; data[6] *= s; data[7] *= s; data[8] *= s; return *this; } /// Elementwise divide operator (3-by-3) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; result.data[6] = data[6] / rhs.data[6]; result.data[7] = data[7] / rhs.data[7]; result.data[8] = data[8] / rhs.data[8]; return result; } /// Scalar divide operator (3-by-3) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; result.data[6] = data[6] / s; result.data[7] = data[7] / s; result.data[8] = data[8] / s; return result; } /// Scalar divide operator (3-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (3-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; data[6] /= s; data[7] /= s; data[8] /= s; return *this; } /// Elementwise divide operator (3-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (3-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; data[6] /= rhs.data[6]; data[7] /= rhs.data[7]; data[8] /= rhs.data[8]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; m.data[6] = -m.data[6]; m.data[7] = -m.data[7]; m.data[8] = -m.data[8]; return m; } /// Matrix product of size 3-by-1-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> product( Matrix<Element, 3, 1> const &rhs, Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[3] * rhs.data[0]; accum.data[2] += data[6] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[4] * rhs.data[1]; accum.data[2] += data[7] * rhs.data[1]; // k=2 accum.data[0] += data[2] * rhs.data[2]; accum.data[1] += data[5] * rhs.data[2]; accum.data[2] += data[8] * rhs.data[2]; return accum; } /// Matrix product of size 3-by-1-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> operator*(Matrix<Element, 3, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> product( Matrix<Element, 3, 2> const &rhs, Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[3] * rhs.data[0]; accum.data[3] += data[3] * rhs.data[1]; accum.data[4] += data[6] * rhs.data[0]; accum.data[5] += data[6] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[4] * rhs.data[2]; accum.data[3] += data[4] * rhs.data[3]; accum.data[4] += data[7] * rhs.data[2]; accum.data[5] += data[7] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; accum.data[2] += data[5] * rhs.data[4]; accum.data[3] += data[5] * rhs.data[5]; accum.data[4] += data[8] * rhs.data[4]; accum.data[5] += data[8] * rhs.data[5]; return accum; } /// Matrix product of size 3-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> operator*(Matrix<Element, 3, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> product( Matrix<Element, 3, 3> const &rhs, Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[3] * rhs.data[0]; accum.data[4] += data[3] * rhs.data[1]; accum.data[5] += data[3] * rhs.data[2]; accum.data[6] += data[6] * rhs.data[0]; accum.data[7] += data[6] * rhs.data[1]; accum.data[8] += data[6] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[4] * rhs.data[3]; accum.data[4] += data[4] * rhs.data[4]; accum.data[5] += data[4] * rhs.data[5]; accum.data[6] += data[7] * rhs.data[3]; accum.data[7] += data[7] * rhs.data[4]; accum.data[8] += data[7] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; accum.data[3] += data[5] * rhs.data[6]; accum.data[4] += data[5] * rhs.data[7]; accum.data[5] += data[5] * rhs.data[8]; accum.data[6] += data[8] * rhs.data[6]; accum.data[7] += data[8] * rhs.data[7]; accum.data[8] += data[8] * rhs.data[8]; return accum; } /// Matrix product of size 3-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> operator*(Matrix<Element, 3, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-3-by-3 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 3-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> product( Matrix<Element, 3, 4> const &rhs, Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[3] * rhs.data[0]; accum.data[5] += data[3] * rhs.data[1]; accum.data[6] += data[3] * rhs.data[2]; accum.data[7] += data[3] * rhs.data[3]; accum.data[8] += data[6] * rhs.data[0]; accum.data[9] += data[6] * rhs.data[1]; accum.data[10] += data[6] * rhs.data[2]; accum.data[11] += data[6] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[4] * rhs.data[4]; accum.data[5] += data[4] * rhs.data[5]; accum.data[6] += data[4] * rhs.data[6]; accum.data[7] += data[4] * rhs.data[7]; accum.data[8] += data[7] * rhs.data[4]; accum.data[9] += data[7] * rhs.data[5]; accum.data[10] += data[7] * rhs.data[6]; accum.data[11] += data[7] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; accum.data[4] += data[5] * rhs.data[8]; accum.data[5] += data[5] * rhs.data[9]; accum.data[6] += data[5] * rhs.data[10]; accum.data[7] += data[5] * rhs.data[11]; accum.data[8] += data[8] * rhs.data[8]; accum.data[9] += data[8] * rhs.data[9]; accum.data[10] += data[8] * rhs.data[10]; accum.data[11] += data[8] * rhs.data[11]; return accum; } /// Matrix product of size 3-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> operator*(Matrix<Element, 3, 4> const &rhs) const { return product(rhs); } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; accum += data[6]; accum += data[7]; accum += data[8]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; accum += data[6] * data[6]; accum += data[7] * data[7]; accum += data[8] * data[8]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[4]; accum += data[8]; return accum; } /// Returns 3-by-3 rotation matrix around the X axis CUTLASS_HOST_DEVICE static Matrix rotation_X(Element theta) { Matrix m = identity(); Element c = fast_cos(theta); Element s = fast_sin(theta); m.at(1, 1) = c; m.at(1, 2) = -s; m.at(2, 1) = s; m.at(2, 2) = c; return m; } /// Returns 3-by-3 rotation matrix around the Y axis CUTLASS_HOST_DEVICE static Matrix rotation_Y(Element theta) { Matrix m = identity(); Element c = fast_cos(theta); Element s = fast_sin(theta); m.at(0, 0) = c; m.at(2, 0) = -s; m.at(0, 2) = s; m.at(2, 2) = c; return m; } /// Returns 3-by-3 rotation matrix around the Z axis CUTLASS_HOST_DEVICE static Matrix rotation_Z(Element theta) { Matrix m = Matrix::identity(); Element c = fast_cos(theta); Element s = fast_sin(theta); m.at(0, 0) = c; m.at(0, 1) = -s; m.at(1, 0) = s; m.at(1, 1) = c; return m; } /// Returns a 3-by-3 rotation matrix around a unit-length axis CUTLASS_HOST_DEVICE static Matrix rotation(Element theta, Matrix<Element, 3, 1> const &u) { Element x = u.data[0]; Element y = u.data[1]; Element z = u.data[2]; Element c = fast_cos(theta); Element s = fast_sin(theta); Element one_minus_cos = Element(1) - fast_cos(theta); Matrix m; m.set_slice3x3({ c + x * x * one_minus_cos, x * y * one_minus_cos - z * s, x * z * one_minus_cos + y * s, y * x * one_minus_cos * z * s, c + y * y * one_minus_cos, y * z * one_minus_cos - x * s, z * x * one_minus_cos - y * s, z * y * one_minus_cos + x * s, c + z * z * one_minus_cos }); return m; } /// Returns a 3-by-3 reflection about the plane specified by the /// unit-length normal vector n_unit CUTLASS_HOST_DEVICE static Matrix reflection(Matrix<Element, 3, 1> const &n_unit) { Element a = n_unit.data[0]; Element b = n_unit.data[1]; Element c = n_unit.data[2]; Matrix m = Matrix::identity(); m.set_slice3x3({ Element(1) - Element(2) * a * a, Element(-2) * a * b, Element(-2) * a * c, Element(-2) * a * b, Element(1) - Element(2) * b * b, Element(-2) * b * c, Element(-2) * a * c, Element(-2) * b * c, Element(1) - Element(2) * c * c }); return m; } /// Computes the determinant of a 3-by-3 matrix CUTLASS_HOST_DEVICE Element determinant(Element accum = Element()) const { accum += at(0, 0) * Matrix<Element, 2, 2>({ at(1, 1), at(1, 2), at(2, 1), at(2, 2) }).determinant(); accum -= at(0, 1) * Matrix<Element, 2, 2>({ at(1, 0), at(1, 2), at(2, 0), at(2, 2) }).determinant(); accum += at(0, 2) * Matrix<Element, 2, 2>({ at(1, 0), at(1, 1), at(2, 0), at(2, 1) }).determinant(); return accum; } /// Computes the inverse of a 3-by-3 matrix given /// the matrix's determinant CUTLASS_HOST_DEVICE Matrix inverse(Element det) const { return Matrix( at(1, 1) * at(2, 2) - at(1, 2) * at(2, 1), at(0, 2) * at(2, 1) - at(0, 1) * at(2, 2), at(0, 1) * at(1, 2) - at(0, 2) * at(1, 1), at(1, 2) * at(2, 0) - at(1, 0) * at(2, 2), at(0, 0) * at(2, 2) - at(0, 2) * at(2, 0), at(0, 2) * at(1, 0) - at(0, 0) * at(1, 2), at(1, 0) * at(2, 1) - at(1, 1) * at(2, 0), at(0, 1) * at(2, 0) - at(0, 0) * at(2, 1), at(0, 0) * at(1, 1) - at(0, 1) * at(1, 0) ) * (Element(1) / det); } /// Computes the inverse of a 3-by-3 matrix CUTLASS_HOST_DEVICE Matrix inverse() const { return inverse(determinant()); } }; /// Template alias for 3-by-3 matrix template <typename Element> using Matrix3x3 = Matrix<Element, 3, 3>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix3x3<Element> make_Matrix3x3( Element _0_0, Element _0_1, Element _0_2, Element _1_0, Element _1_1, Element _1_2, Element _2_0, Element _2_1, Element _2_2 ) { return Matrix3x3<Element>( _0_0, _0_1, _0_2, _1_0, _1_1, _1_2, _2_0, _2_1, _2_2 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 3-by-4 matrix template class definition template <typename Element_> struct Matrix<Element_, 3, 4> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 3; /// Number of columns in matrix static int const kColumns = 4; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 12; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 3-by-4 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 3-by-4 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _0_3, Element _1_0, Element _1_1, Element _1_2, Element _1_3, Element _2_0, Element _2_1, Element _2_2, Element _2_3 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3; data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3; data[8] = _2_0; data[9] = _2_1; data[10] = _2_2; data[11] = _2_3; } /// Constucts a 3-by-4 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 4> const &row_0, Matrix<Element, 1, 4> const &row_1, Matrix<Element, 1, 4> const &row_2 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_0.data[2]; data[3] = row_0.data[3]; data[4] = row_1.data[0]; data[5] = row_1.data[1]; data[6] = row_1.data[2]; data[7] = row_1.data[3]; data[8] = row_2.data[0]; data[9] = row_2.data[1]; data[10] = row_2.data[2]; data[11] = row_2.data[3]; } /// Static method to construct a 3-by-4 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 4, 1> const &column_0, Matrix<Element, 4, 1> const &column_1, Matrix<Element, 4, 1> const &column_2, Matrix<Element, 4, 1> const &column_3 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_2.data[0]; result.data[3] = column_3.data[0]; result.data[4] = column_0.data[1]; result.data[5] = column_1.data[1]; result.data[6] = column_2.data[1]; result.data[7] = column_3.data[1]; result.data[8] = column_0.data[2]; result.data[9] = column_1.data[2]; result.data[10] = column_2.data[2]; result.data[11] = column_3.data[2]; return result; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; m.data[6] = s; m.data[7] = s; m.data[8] = s; m.data[9] = s; m.data[10] = s; m.data[11] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[4] = diag.data[1]; m.data[8] = diag.data[2]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[4] = diag.data[1]; m.data[8] = diag.data[2]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> diagonal() const { Matrix<Element, 3, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[4]; diag.data[2] = data[8]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> transpose() const { Matrix<Element, 4, 3> mt; mt.data[0] = data[0]; mt.data[3] = data[1]; mt.data[6] = data[2]; mt.data[9] = data[3]; mt.data[1] = data[4]; mt.data[4] = data[5]; mt.data[7] = data[6]; mt.data[10] = data[7]; mt.data[2] = data[8]; mt.data[5] = data[9]; mt.data[8] = data[10]; mt.data[11] = data[11]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 3 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const { Matrix<Element, 1, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> row(int i) const { return slice_1x4(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) { return set_slice_1x4(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 4] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 4]; m.data[3] = data[i * 4 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 4] = m.data[2]; data[i * 4 + j + 5] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const { Matrix<Element, 2, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 4]; m.data[4] = data[i * 4 + j + 5]; m.data[5] = data[i * 4 + j + 6]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 4] = m.data[3]; data[i * 4 + j + 5] = m.data[4]; data[i * 4 + j + 6] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const { Matrix<Element, 2, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; m.data[4] = data[i * 4 + j + 4]; m.data[5] = data[i * 4 + j + 5]; m.data[6] = data[i * 4 + j + 6]; m.data[7] = data[i * 4 + j + 7]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; data[i * 4 + j + 4] = m.data[4]; data[i * 4 + j + 5] = m.data[5]; data[i * 4 + j + 6] = m.data[6]; data[i * 4 + j + 7] = m.data[7]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 4]; m.data[2] = data[i * 4 + j + 8]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 4] = m.data[1]; data[i * 4 + j + 8] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> column(int j) const { return slice_3x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) { return set_slice_3x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const { Matrix<Element, 3, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 4]; m.data[3] = data[i * 4 + j + 5]; m.data[4] = data[i * 4 + j + 8]; m.data[5] = data[i * 4 + j + 9]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 4] = m.data[2]; data[i * 4 + j + 5] = m.data[3]; data[i * 4 + j + 8] = m.data[4]; data[i * 4 + j + 9] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const { Matrix<Element, 3, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 4]; m.data[4] = data[i * 4 + j + 5]; m.data[5] = data[i * 4 + j + 6]; m.data[6] = data[i * 4 + j + 8]; m.data[7] = data[i * 4 + j + 9]; m.data[8] = data[i * 4 + j + 10]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 4] = m.data[3]; data[i * 4 + j + 5] = m.data[4]; data[i * 4 + j + 6] = m.data[5]; data[i * 4 + j + 8] = m.data[6]; data[i * 4 + j + 9] = m.data[7]; data[i * 4 + j + 10] = m.data[8]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> slice_3x4(int i = 0, int j = 0) const { Matrix<Element, 3, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; m.data[4] = data[i * 4 + j + 4]; m.data[5] = data[i * 4 + j + 5]; m.data[6] = data[i * 4 + j + 6]; m.data[7] = data[i * 4 + j + 7]; m.data[8] = data[i * 4 + j + 8]; m.data[9] = data[i * 4 + j + 9]; m.data[10] = data[i * 4 + j + 10]; m.data[11] = data[i * 4 + j + 11]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x4(Matrix<Element, 3, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; data[i * 4 + j + 4] = m.data[4]; data[i * 4 + j + 5] = m.data[5]; data[i * 4 + j + 6] = m.data[6]; data[i * 4 + j + 7] = m.data[7]; data[i * 4 + j + 8] = m.data[8]; data[i * 4 + j + 9] = m.data[9]; data[i * 4 + j + 10] = m.data[10]; data[i * 4 + j + 11] = m.data[11]; return *this; } /// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-3 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 3> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2) , lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2) , lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1), rhs.at(2, 2)); } /// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-2 matrix with a 3-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 3, 2> const & lhs, Matrix<Element, 3, 2> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1) , lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1) , lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0), rhs.at(2, 1)); } /// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-3 matrix with a 3-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 3, 3> const & lhs, Matrix<Element, 3, 1> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0) , lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0) , lhs.at(2, 0), lhs.at(2, 1), lhs.at(2, 2), rhs.at(2, 0)); } /// Forms a 3-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 2-by-4 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 2, 4> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3) , lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3)); } /// Forms a 3-by-4 matrix by vertically concatenating a 2-by-4 matrix with a 1-by-4 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 4> const & upper, Matrix<Element, 1, 4> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3) , upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)); } /// Concatenates this matrix with a a 1-by-4 matrix to form a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> vcat(Matrix<Element, 1, 4> const & rhs) const { return Matrix<Element, 4, 4>::vcat(*this, rhs); } /// Forms a 3-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Matrix<Element, 1, 3> const & B, Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 3> const & D) { return Matrix( A, B.at(0, 0), B.at(0, 1), B.at(0, 2) , C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2) , C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2) ); } /// Forms a 3-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B, Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 2> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1) , C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1) , C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1) ); } /// Forms a 3-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 3> const & A, Element B, Matrix<Element, 2, 3> const & C, Matrix<Element, 2, 1> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), A.at(0, 2), B , C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0) , C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0) ); } /// Forms a 3-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 3> const & B, Element C, Matrix<Element, 1, 3> const & D) { return Matrix( A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2) , A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2) , C, D.at(0, 0), D.at(0, 1), D.at(0, 2) ); } /// Forms a 3-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 2> const & B, Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1) , A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1) , C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1) ); } /// Forms a 3-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 3> const & A, Matrix<Element, 2, 1> const & B, Matrix<Element, 1, 3> const & C, Element D) { return Matrix( A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0) , A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0) , C.at(0, 0), C.at(0, 1), C.at(0, 2), D ); } /// Elementwise add operator (3-by-4) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; result.data[6] = data[6] + rhs.data[6]; result.data[7] = data[7] + rhs.data[7]; result.data[8] = data[8] + rhs.data[8]; result.data[9] = data[9] + rhs.data[9]; result.data[10] = data[10] + rhs.data[10]; result.data[11] = data[11] + rhs.data[11]; return result; } /// Elementwise add operator (3-by-4) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (3-by-4) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; data[6] += rhs.data[6]; data[7] += rhs.data[7]; data[8] += rhs.data[8]; data[9] += rhs.data[9]; data[10] += rhs.data[10]; data[11] += rhs.data[11]; return *this; } /// Elementwise subtract operator (3-by-4) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; result.data[6] = data[6] - rhs.data[6]; result.data[7] = data[7] - rhs.data[7]; result.data[8] = data[8] - rhs.data[8]; result.data[9] = data[9] - rhs.data[9]; result.data[10] = data[10] - rhs.data[10]; result.data[11] = data[11] - rhs.data[11]; return result; } /// Elementwise subtract operator (3-by-4) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (3-by-4) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; data[6] -= rhs.data[6]; data[7] -= rhs.data[7]; data[8] -= rhs.data[8]; data[9] -= rhs.data[9]; data[10] -= rhs.data[10]; data[11] -= rhs.data[11]; return *this; } /// Elementwise multiply operator (3-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; result.data[6] = data[6] * rhs.data[6]; result.data[7] = data[7] * rhs.data[7]; result.data[8] = data[8] * rhs.data[8]; result.data[9] = data[9] * rhs.data[9]; result.data[10] = data[10] * rhs.data[10]; result.data[11] = data[11] * rhs.data[11]; return result; } /// Scalar multiply operator (3-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; result.data[6] = data[6] * s; result.data[7] = data[7] * s; result.data[8] = data[8] * s; result.data[9] = data[9] * s; result.data[10] = data[10] * s; result.data[11] = data[11] * s; return result; } /// Scalar multiply operator (3-by-4) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (3-by-4) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; data[6] *= s; data[7] *= s; data[8] *= s; data[9] *= s; data[10] *= s; data[11] *= s; return *this; } /// Elementwise divide operator (3-by-4) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; result.data[6] = data[6] / rhs.data[6]; result.data[7] = data[7] / rhs.data[7]; result.data[8] = data[8] / rhs.data[8]; result.data[9] = data[9] / rhs.data[9]; result.data[10] = data[10] / rhs.data[10]; result.data[11] = data[11] / rhs.data[11]; return result; } /// Scalar divide operator (3-by-4) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; result.data[6] = data[6] / s; result.data[7] = data[7] / s; result.data[8] = data[8] / s; result.data[9] = data[9] / s; result.data[10] = data[10] / s; result.data[11] = data[11] / s; return result; } /// Scalar divide operator (3-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (3-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; data[6] /= s; data[7] /= s; data[8] /= s; data[9] /= s; data[10] /= s; data[11] /= s; return *this; } /// Elementwise divide operator (3-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (3-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; data[6] /= rhs.data[6]; data[7] /= rhs.data[7]; data[8] /= rhs.data[8]; data[9] /= rhs.data[9]; data[10] /= rhs.data[10]; data[11] /= rhs.data[11]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; m.data[6] = -m.data[6]; m.data[7] = -m.data[7]; m.data[8] = -m.data[8]; m.data[9] = -m.data[9]; m.data[10] = -m.data[10]; m.data[11] = -m.data[11]; return m; } /// Matrix product of size 3-by-1-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> product( Matrix<Element, 4, 1> const &rhs, Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[4] * rhs.data[0]; accum.data[2] += data[8] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[5] * rhs.data[1]; accum.data[2] += data[9] * rhs.data[1]; // k=2 accum.data[0] += data[2] * rhs.data[2]; accum.data[1] += data[6] * rhs.data[2]; accum.data[2] += data[10] * rhs.data[2]; // k=3 accum.data[0] += data[3] * rhs.data[3]; accum.data[1] += data[7] * rhs.data[3]; accum.data[2] += data[11] * rhs.data[3]; return accum; } /// Matrix product of size 3-by-1-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> operator*(Matrix<Element, 4, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> product( Matrix<Element, 4, 2> const &rhs, Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[4] * rhs.data[0]; accum.data[3] += data[4] * rhs.data[1]; accum.data[4] += data[8] * rhs.data[0]; accum.data[5] += data[8] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[5] * rhs.data[2]; accum.data[3] += data[5] * rhs.data[3]; accum.data[4] += data[9] * rhs.data[2]; accum.data[5] += data[9] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; accum.data[2] += data[6] * rhs.data[4]; accum.data[3] += data[6] * rhs.data[5]; accum.data[4] += data[10] * rhs.data[4]; accum.data[5] += data[10] * rhs.data[5]; // k=3 accum.data[0] += data[3] * rhs.data[6]; accum.data[1] += data[3] * rhs.data[7]; accum.data[2] += data[7] * rhs.data[6]; accum.data[3] += data[7] * rhs.data[7]; accum.data[4] += data[11] * rhs.data[6]; accum.data[5] += data[11] * rhs.data[7]; return accum; } /// Matrix product of size 3-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> operator*(Matrix<Element, 4, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> product( Matrix<Element, 4, 3> const &rhs, Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[4] * rhs.data[0]; accum.data[4] += data[4] * rhs.data[1]; accum.data[5] += data[4] * rhs.data[2]; accum.data[6] += data[8] * rhs.data[0]; accum.data[7] += data[8] * rhs.data[1]; accum.data[8] += data[8] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[5] * rhs.data[3]; accum.data[4] += data[5] * rhs.data[4]; accum.data[5] += data[5] * rhs.data[5]; accum.data[6] += data[9] * rhs.data[3]; accum.data[7] += data[9] * rhs.data[4]; accum.data[8] += data[9] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; accum.data[3] += data[6] * rhs.data[6]; accum.data[4] += data[6] * rhs.data[7]; accum.data[5] += data[6] * rhs.data[8]; accum.data[6] += data[10] * rhs.data[6]; accum.data[7] += data[10] * rhs.data[7]; accum.data[8] += data[10] * rhs.data[8]; // k=3 accum.data[0] += data[3] * rhs.data[9]; accum.data[1] += data[3] * rhs.data[10]; accum.data[2] += data[3] * rhs.data[11]; accum.data[3] += data[7] * rhs.data[9]; accum.data[4] += data[7] * rhs.data[10]; accum.data[5] += data[7] * rhs.data[11]; accum.data[6] += data[11] * rhs.data[9]; accum.data[7] += data[11] * rhs.data[10]; accum.data[8] += data[11] * rhs.data[11]; return accum; } /// Matrix product of size 3-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> operator*(Matrix<Element, 4, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> product( Matrix<Element, 4, 4> const &rhs, Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[4] * rhs.data[0]; accum.data[5] += data[4] * rhs.data[1]; accum.data[6] += data[4] * rhs.data[2]; accum.data[7] += data[4] * rhs.data[3]; accum.data[8] += data[8] * rhs.data[0]; accum.data[9] += data[8] * rhs.data[1]; accum.data[10] += data[8] * rhs.data[2]; accum.data[11] += data[8] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[5] * rhs.data[4]; accum.data[5] += data[5] * rhs.data[5]; accum.data[6] += data[5] * rhs.data[6]; accum.data[7] += data[5] * rhs.data[7]; accum.data[8] += data[9] * rhs.data[4]; accum.data[9] += data[9] * rhs.data[5]; accum.data[10] += data[9] * rhs.data[6]; accum.data[11] += data[9] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; accum.data[4] += data[6] * rhs.data[8]; accum.data[5] += data[6] * rhs.data[9]; accum.data[6] += data[6] * rhs.data[10]; accum.data[7] += data[6] * rhs.data[11]; accum.data[8] += data[10] * rhs.data[8]; accum.data[9] += data[10] * rhs.data[9]; accum.data[10] += data[10] * rhs.data[10]; accum.data[11] += data[10] * rhs.data[11]; // k=3 accum.data[0] += data[3] * rhs.data[12]; accum.data[1] += data[3] * rhs.data[13]; accum.data[2] += data[3] * rhs.data[14]; accum.data[3] += data[3] * rhs.data[15]; accum.data[4] += data[7] * rhs.data[12]; accum.data[5] += data[7] * rhs.data[13]; accum.data[6] += data[7] * rhs.data[14]; accum.data[7] += data[7] * rhs.data[15]; accum.data[8] += data[11] * rhs.data[12]; accum.data[9] += data[11] * rhs.data[13]; accum.data[10] += data[11] * rhs.data[14]; accum.data[11] += data[11] * rhs.data[15]; return accum; } /// Matrix product of size 3-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> operator*(Matrix<Element, 4, 4> const &rhs) const { return product(rhs); } /// Matrix product of size 3-by-4-by-4 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) { *this = product(rhs); return *this; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; accum += data[6]; accum += data[7]; accum += data[8]; accum += data[9]; accum += data[10]; accum += data[11]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; accum += data[6] * data[6]; accum += data[7] * data[7]; accum += data[8] * data[8]; accum += data[9] * data[9]; accum += data[10] * data[10]; accum += data[11] * data[11]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[5]; accum += data[10]; return accum; } }; /// Template alias for 3-by-4 matrix template <typename Element> using Matrix3x4 = Matrix<Element, 3, 4>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix3x4<Element> make_Matrix3x4( Element _0_0, Element _0_1, Element _0_2, Element _0_3, Element _1_0, Element _1_1, Element _1_2, Element _1_3, Element _2_0, Element _2_1, Element _2_2, Element _2_3 ) { return Matrix3x4<Element>( _0_0, _0_1, _0_2, _0_3, _1_0, _1_1, _1_2, _1_3, _2_0, _2_1, _2_2, _2_3 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 4-by-1 matrix template class definition template <typename Element_> struct Matrix<Element_, 4, 1> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 4; /// Number of columns in matrix static int const kColumns = 1; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 4; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 4-by-1 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 4-by-1 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _1_0, Element _2_0, Element _3_0 ) { data[0] = _0_0; data[1] = _1_0; data[2] = _2_0; data[3] = _3_0; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> transpose() const { Matrix<Element, 1, 4> mt; mt.data[0] = data[0]; mt.data[1] = data[1]; mt.data[2] = data[2]; mt.data[3] = data[3]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 1 + j + 0]; m.data[1] = data[i * 1 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 1 + j + 0] = m.data[0]; data[i * 1 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 1 + j + 0]; m.data[1] = data[i * 1 + j + 1]; m.data[2] = data[i * 1 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 1 + j + 0] = m.data[0]; data[i * 1 + j + 1] = m.data[1]; data[i * 1 + j + 2] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const { Matrix<Element, 4, 1> m; m.data[0] = data[i * 1 + j + 0]; m.data[1] = data[i * 1 + j + 1]; m.data[2] = data[i * 1 + j + 2]; m.data[3] = data[i * 1 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) { data[i * 1 + j + 0] = m.data[0]; data[i * 1 + j + 1] = m.data[1]; data[i * 1 + j + 2] = m.data[2]; data[i * 1 + j + 3] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> column(int j) const { return slice_4x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) { return set_slice_4x1(v, 0, j); } /// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-2 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> hcat(Matrix<Element, 4, 1> const & rhs) const { return Matrix<Element, 4, 2>::hcat(*this, rhs); } /// Concatenates this matrix with a a 4-by-2 matrix to form a 4-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> hcat(Matrix<Element, 4, 2> const & rhs) const { return Matrix<Element, 4, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 4-by-3 matrix to form a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 3> const & rhs) const { return Matrix<Element, 4, 4>::hcat(*this, rhs); } /// Forms a 4-by-1 matrix by vertically concatenating an Element with a 3-by-1 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Element upper, Matrix<Element, 3, 1> const & lower) { return Matrix( upper , lower.at(0, 0) , lower.at(1, 0) , lower.at(2, 0)); } /// Forms a 4-by-1 matrix by vertically concatenating a 2-by-1 matrix with a 2-by-1 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 1> const & upper, Matrix<Element, 2, 1> const & lower) { return Matrix( upper.at(0, 0) , upper.at(1, 0) , lower.at(0, 0) , lower.at(1, 0)); } /// Forms a 4-by-1 matrix by vertically concatenating a 3-by-1 matrix with an Element CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 3, 1> const & upper, Element lower) { return Matrix( upper.at(0, 0) , upper.at(1, 0) , upper.at(2, 0) , lower); } /// Elementwise add operator (4-by-1) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; return result; } /// Elementwise add operator (4-by-1) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (4-by-1) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; return *this; } /// Elementwise subtract operator (4-by-1) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; return result; } /// Elementwise subtract operator (4-by-1) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (4-by-1) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; return *this; } /// Elementwise multiply operator (4-by-1) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; return result; } /// Scalar multiply operator (4-by-1) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; return result; } /// Scalar multiply operator (4-by-1) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (4-by-1) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; return *this; } /// Elementwise divide operator (4-by-1) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; return result; } /// Scalar divide operator (4-by-1) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; return result; } /// Scalar divide operator (4-by-1) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (4-by-1) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; return *this; } /// Elementwise divide operator (4-by-1) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (4-by-1) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; return m; } /// Matrix product of size 4-by-1-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> product( Matrix<Element, 1, 1> const &rhs, Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[1] * rhs.data[0]; accum.data[2] += data[2] * rhs.data[0]; accum.data[3] += data[3] * rhs.data[0]; return accum; } /// Matrix product of size 4-by-1-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> operator*(Matrix<Element, 1, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-1-by-1 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 4-by-2-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> product( Matrix<Element, 1, 2> const &rhs, Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[1] * rhs.data[0]; accum.data[3] += data[1] * rhs.data[1]; accum.data[4] += data[2] * rhs.data[0]; accum.data[5] += data[2] * rhs.data[1]; accum.data[6] += data[3] * rhs.data[0]; accum.data[7] += data[3] * rhs.data[1]; return accum; } /// Matrix product of size 4-by-2-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> operator*(Matrix<Element, 1, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-3-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> product( Matrix<Element, 1, 3> const &rhs, Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[1] * rhs.data[0]; accum.data[4] += data[1] * rhs.data[1]; accum.data[5] += data[1] * rhs.data[2]; accum.data[6] += data[2] * rhs.data[0]; accum.data[7] += data[2] * rhs.data[1]; accum.data[8] += data[2] * rhs.data[2]; accum.data[9] += data[3] * rhs.data[0]; accum.data[10] += data[3] * rhs.data[1]; accum.data[11] += data[3] * rhs.data[2]; return accum; } /// Matrix product of size 4-by-3-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> operator*(Matrix<Element, 1, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-4-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> product( Matrix<Element, 1, 4> const &rhs, Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[1] * rhs.data[0]; accum.data[5] += data[1] * rhs.data[1]; accum.data[6] += data[1] * rhs.data[2]; accum.data[7] += data[1] * rhs.data[3]; accum.data[8] += data[2] * rhs.data[0]; accum.data[9] += data[2] * rhs.data[1]; accum.data[10] += data[2] * rhs.data[2]; accum.data[11] += data[2] * rhs.data[3]; accum.data[12] += data[3] * rhs.data[0]; accum.data[13] += data[3] * rhs.data[1]; accum.data[14] += data[3] * rhs.data[2]; accum.data[15] += data[3] * rhs.data[3]; return accum; } /// Matrix product of size 4-by-4-by-1 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> operator*(Matrix<Element, 1, 4> const &rhs) const { return product(rhs); } /// Dot product of vectors with extent 4 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; accum += data[3] * rhs.data[3]; return accum; } /// Dot product of vectors with extent 4 CUTLASS_HOST_DEVICE Element dot(Matrix<Element, 1, 4> const &rhs, Element accum = Element()) const { accum += data[0] * rhs.data[0]; accum += data[1] * rhs.data[1]; accum += data[2] * rhs.data[2]; accum += data[3] * rhs.data[3]; return accum; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; return accum; } }; /// Template alias for 4-by-1 matrix template <typename Element> using Matrix4x1 = Matrix<Element, 4, 1>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix4x1<Element> make_Matrix4x1( Element _0_0, Element _1_0, Element _2_0, Element _3_0 ) { return Matrix4x1<Element>( _0_0, _1_0, _2_0, _3_0 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 4-by-2 matrix template class definition template <typename Element_> struct Matrix<Element_, 4, 2> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 4; /// Number of columns in matrix static int const kColumns = 2; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 8; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 4-by-2 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 4-by-2 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _1_0, Element _1_1, Element _2_0, Element _2_1, Element _3_0, Element _3_1 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _1_0; data[3] = _1_1; data[4] = _2_0; data[5] = _2_1; data[6] = _3_0; data[7] = _3_1; } /// Constucts a 4-by-2 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 2> const &row_0, Matrix<Element, 1, 2> const &row_1, Matrix<Element, 1, 2> const &row_2, Matrix<Element, 1, 2> const &row_3 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_1.data[0]; data[3] = row_1.data[1]; data[4] = row_2.data[0]; data[5] = row_2.data[1]; data[6] = row_3.data[0]; data[7] = row_3.data[1]; } /// Static method to construct a 4-by-2 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 2, 1> const &column_0, Matrix<Element, 2, 1> const &column_1 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_0.data[1]; result.data[3] = column_1.data[1]; result.data[4] = column_0.data[2]; result.data[5] = column_1.data[2]; result.data[6] = column_0.data[3]; result.data[7] = column_1.data[3]; return result; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; m.data[6] = s; m.data[7] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[5] = diag.data[1]; m.data[10] = diag.data[2]; m.data[15] = diag.data[3]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[5] = diag.data[1]; m.data[10] = diag.data[2]; m.data[15] = diag.data[3]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> diagonal() const { Matrix<Element, 2, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[5]; diag.data[2] = data[10]; diag.data[3] = data[15]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> transpose() const { Matrix<Element, 2, 4> mt; mt.data[0] = data[0]; mt.data[4] = data[1]; mt.data[1] = data[2]; mt.data[5] = data[3]; mt.data[2] = data[4]; mt.data[6] = data[5]; mt.data[3] = data[6]; mt.data[7] = data[7]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> row(int i) const { return slice_1x2(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) { return set_slice_1x2(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 2] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; m.data[2] = data[i * 2 + j + 2]; m.data[3] = data[i * 2 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; data[i * 2 + j + 2] = m.data[2]; data[i * 2 + j + 3] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 2]; m.data[2] = data[i * 2 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 2] = m.data[1]; data[i * 2 + j + 4] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const { Matrix<Element, 3, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; m.data[2] = data[i * 2 + j + 2]; m.data[3] = data[i * 2 + j + 3]; m.data[4] = data[i * 2 + j + 4]; m.data[5] = data[i * 2 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; data[i * 2 + j + 2] = m.data[2]; data[i * 2 + j + 3] = m.data[3]; data[i * 2 + j + 4] = m.data[4]; data[i * 2 + j + 5] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const { Matrix<Element, 4, 1> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 2]; m.data[2] = data[i * 2 + j + 4]; m.data[3] = data[i * 2 + j + 6]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 2] = m.data[1]; data[i * 2 + j + 4] = m.data[2]; data[i * 2 + j + 6] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> column(int j) const { return slice_4x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) { return set_slice_4x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const { Matrix<Element, 4, 2> m; m.data[0] = data[i * 2 + j + 0]; m.data[1] = data[i * 2 + j + 1]; m.data[2] = data[i * 2 + j + 2]; m.data[3] = data[i * 2 + j + 3]; m.data[4] = data[i * 2 + j + 4]; m.data[5] = data[i * 2 + j + 5]; m.data[6] = data[i * 2 + j + 6]; m.data[7] = data[i * 2 + j + 7]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) { data[i * 2 + j + 0] = m.data[0]; data[i * 2 + j + 1] = m.data[1]; data[i * 2 + j + 2] = m.data[2]; data[i * 2 + j + 3] = m.data[3]; data[i * 2 + j + 4] = m.data[4]; data[i * 2 + j + 5] = m.data[5]; data[i * 2 + j + 6] = m.data[6]; data[i * 2 + j + 7] = m.data[7]; return *this; } /// Forms a 4-by-2 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 1> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0) , lhs.at(1, 0), rhs.at(1, 0) , lhs.at(2, 0), rhs.at(2, 0) , lhs.at(3, 0), rhs.at(3, 0)); } /// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-3 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> hcat(Matrix<Element, 4, 1> const & rhs) const { return Matrix<Element, 4, 3>::hcat(*this, rhs); } /// Concatenates this matrix with a a 4-by-2 matrix to form a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 2> const & rhs) const { return Matrix<Element, 4, 4>::hcat(*this, rhs); } /// Forms a 4-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 3-by-2 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 3, 2> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1) , lower.at(0, 0), lower.at(0, 1) , lower.at(1, 0), lower.at(1, 1) , lower.at(2, 0), lower.at(2, 1)); } /// Forms a 4-by-2 matrix by vertically concatenating a 2-by-2 matrix with a 2-by-2 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 2> const & upper, Matrix<Element, 2, 2> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1) , upper.at(1, 0), upper.at(1, 1) , lower.at(0, 0), lower.at(0, 1) , lower.at(1, 0), lower.at(1, 1)); } /// Forms a 4-by-2 matrix by vertically concatenating a 3-by-2 matrix with a 1-by-2 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 3, 2> const & upper, Matrix<Element, 1, 2> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1) , upper.at(1, 0), upper.at(1, 1) , upper.at(2, 0), upper.at(2, 1) , lower.at(0, 0), lower.at(0, 1)); } /// Forms a 4-by-2 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Element B, Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 1> const & D) { return Matrix( A, B , C.at(0, 0), D.at(0, 0) , C.at(1, 0), D.at(1, 0) , C.at(2, 0), D.at(2, 0) ); } /// Forms a 4-by-2 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 1> const & B, Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 1> const & D) { return Matrix( A.at(0, 0), B.at(0, 0) , A.at(1, 0), B.at(1, 0) , C.at(0, 0), D.at(0, 0) , C.at(1, 0), D.at(1, 0) ); } /// Forms a 4-by-2 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 1> const & B, Element C, Element D) { return Matrix( A.at(0, 0), B.at(0, 0) , A.at(1, 0), B.at(1, 0) , A.at(2, 0), B.at(2, 0) , C, D ); } /// Elementwise add operator (4-by-2) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; result.data[6] = data[6] + rhs.data[6]; result.data[7] = data[7] + rhs.data[7]; return result; } /// Elementwise add operator (4-by-2) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (4-by-2) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; data[6] += rhs.data[6]; data[7] += rhs.data[7]; return *this; } /// Elementwise subtract operator (4-by-2) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; result.data[6] = data[6] - rhs.data[6]; result.data[7] = data[7] - rhs.data[7]; return result; } /// Elementwise subtract operator (4-by-2) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (4-by-2) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; data[6] -= rhs.data[6]; data[7] -= rhs.data[7]; return *this; } /// Elementwise multiply operator (4-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; result.data[6] = data[6] * rhs.data[6]; result.data[7] = data[7] * rhs.data[7]; return result; } /// Scalar multiply operator (4-by-2) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; result.data[6] = data[6] * s; result.data[7] = data[7] * s; return result; } /// Scalar multiply operator (4-by-2) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (4-by-2) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; data[6] *= s; data[7] *= s; return *this; } /// Elementwise divide operator (4-by-2) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; result.data[6] = data[6] / rhs.data[6]; result.data[7] = data[7] / rhs.data[7]; return result; } /// Scalar divide operator (4-by-2) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; result.data[6] = data[6] / s; result.data[7] = data[7] / s; return result; } /// Scalar divide operator (4-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (4-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; data[6] /= s; data[7] /= s; return *this; } /// Elementwise divide operator (4-by-2) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (4-by-2) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; data[6] /= rhs.data[6]; data[7] /= rhs.data[7]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; m.data[6] = -m.data[6]; m.data[7] = -m.data[7]; return m; } /// Matrix product of size 4-by-1-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> product( Matrix<Element, 2, 1> const &rhs, Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[2] * rhs.data[0]; accum.data[2] += data[4] * rhs.data[0]; accum.data[3] += data[6] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[3] * rhs.data[1]; accum.data[2] += data[5] * rhs.data[1]; accum.data[3] += data[7] * rhs.data[1]; return accum; } /// Matrix product of size 4-by-1-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> operator*(Matrix<Element, 2, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> product( Matrix<Element, 2, 2> const &rhs, Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[2] * rhs.data[0]; accum.data[3] += data[2] * rhs.data[1]; accum.data[4] += data[4] * rhs.data[0]; accum.data[5] += data[4] * rhs.data[1]; accum.data[6] += data[6] * rhs.data[0]; accum.data[7] += data[6] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[3] * rhs.data[2]; accum.data[3] += data[3] * rhs.data[3]; accum.data[4] += data[5] * rhs.data[2]; accum.data[5] += data[5] * rhs.data[3]; accum.data[6] += data[7] * rhs.data[2]; accum.data[7] += data[7] * rhs.data[3]; return accum; } /// Matrix product of size 4-by-2-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> operator*(Matrix<Element, 2, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-2-by-2 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 4-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> product( Matrix<Element, 2, 3> const &rhs, Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[2] * rhs.data[0]; accum.data[4] += data[2] * rhs.data[1]; accum.data[5] += data[2] * rhs.data[2]; accum.data[6] += data[4] * rhs.data[0]; accum.data[7] += data[4] * rhs.data[1]; accum.data[8] += data[4] * rhs.data[2]; accum.data[9] += data[6] * rhs.data[0]; accum.data[10] += data[6] * rhs.data[1]; accum.data[11] += data[6] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[3] * rhs.data[3]; accum.data[4] += data[3] * rhs.data[4]; accum.data[5] += data[3] * rhs.data[5]; accum.data[6] += data[5] * rhs.data[3]; accum.data[7] += data[5] * rhs.data[4]; accum.data[8] += data[5] * rhs.data[5]; accum.data[9] += data[7] * rhs.data[3]; accum.data[10] += data[7] * rhs.data[4]; accum.data[11] += data[7] * rhs.data[5]; return accum; } /// Matrix product of size 4-by-3-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> operator*(Matrix<Element, 2, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> product( Matrix<Element, 2, 4> const &rhs, Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[2] * rhs.data[0]; accum.data[5] += data[2] * rhs.data[1]; accum.data[6] += data[2] * rhs.data[2]; accum.data[7] += data[2] * rhs.data[3]; accum.data[8] += data[4] * rhs.data[0]; accum.data[9] += data[4] * rhs.data[1]; accum.data[10] += data[4] * rhs.data[2]; accum.data[11] += data[4] * rhs.data[3]; accum.data[12] += data[6] * rhs.data[0]; accum.data[13] += data[6] * rhs.data[1]; accum.data[14] += data[6] * rhs.data[2]; accum.data[15] += data[6] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[3] * rhs.data[4]; accum.data[5] += data[3] * rhs.data[5]; accum.data[6] += data[3] * rhs.data[6]; accum.data[7] += data[3] * rhs.data[7]; accum.data[8] += data[5] * rhs.data[4]; accum.data[9] += data[5] * rhs.data[5]; accum.data[10] += data[5] * rhs.data[6]; accum.data[11] += data[5] * rhs.data[7]; accum.data[12] += data[7] * rhs.data[4]; accum.data[13] += data[7] * rhs.data[5]; accum.data[14] += data[7] * rhs.data[6]; accum.data[15] += data[7] * rhs.data[7]; return accum; } /// Matrix product of size 4-by-4-by-2 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> operator*(Matrix<Element, 2, 4> const &rhs) const { return product(rhs); } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; accum += data[6]; accum += data[7]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; accum += data[6] * data[6]; accum += data[7] * data[7]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[3]; return accum; } }; /// Template alias for 4-by-2 matrix template <typename Element> using Matrix4x2 = Matrix<Element, 4, 2>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix4x2<Element> make_Matrix4x2( Element _0_0, Element _0_1, Element _1_0, Element _1_1, Element _2_0, Element _2_1, Element _3_0, Element _3_1 ) { return Matrix4x2<Element>( _0_0, _0_1, _1_0, _1_1, _2_0, _2_1, _3_0, _3_1 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 4-by-3 matrix template class definition template <typename Element_> struct Matrix<Element_, 4, 3> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 4; /// Number of columns in matrix static int const kColumns = 3; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 12; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 4-by-3 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 4-by-3 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _1_0, Element _1_1, Element _1_2, Element _2_0, Element _2_1, Element _2_2, Element _3_0, Element _3_1, Element _3_2 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _1_0; data[4] = _1_1; data[5] = _1_2; data[6] = _2_0; data[7] = _2_1; data[8] = _2_2; data[9] = _3_0; data[10] = _3_1; data[11] = _3_2; } /// Constucts a 4-by-3 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 3> const &row_0, Matrix<Element, 1, 3> const &row_1, Matrix<Element, 1, 3> const &row_2, Matrix<Element, 1, 3> const &row_3 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_0.data[2]; data[3] = row_1.data[0]; data[4] = row_1.data[1]; data[5] = row_1.data[2]; data[6] = row_2.data[0]; data[7] = row_2.data[1]; data[8] = row_2.data[2]; data[9] = row_3.data[0]; data[10] = row_3.data[1]; data[11] = row_3.data[2]; } /// Static method to construct a 4-by-3 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 3, 1> const &column_0, Matrix<Element, 3, 1> const &column_1, Matrix<Element, 3, 1> const &column_2 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_2.data[0]; result.data[3] = column_0.data[1]; result.data[4] = column_1.data[1]; result.data[5] = column_2.data[1]; result.data[6] = column_0.data[2]; result.data[7] = column_1.data[2]; result.data[8] = column_2.data[2]; result.data[9] = column_0.data[3]; result.data[10] = column_1.data[3]; result.data[11] = column_2.data[3]; return result; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; m.data[6] = s; m.data[7] = s; m.data[8] = s; m.data[9] = s; m.data[10] = s; m.data[11] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[5] = diag.data[1]; m.data[10] = diag.data[2]; m.data[15] = diag.data[3]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[5] = diag.data[1]; m.data[10] = diag.data[2]; m.data[15] = diag.data[3]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> diagonal() const { Matrix<Element, 3, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[5]; diag.data[2] = data[10]; diag.data[3] = data[15]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> transpose() const { Matrix<Element, 3, 4> mt; mt.data[0] = data[0]; mt.data[4] = data[1]; mt.data[8] = data[2]; mt.data[1] = data[3]; mt.data[5] = data[4]; mt.data[9] = data[5]; mt.data[2] = data[6]; mt.data[6] = data[7]; mt.data[10] = data[8]; mt.data[3] = data[9]; mt.data[7] = data[10]; mt.data[11] = data[11]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> row(int i) const { return slice_1x3(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) { return set_slice_1x3(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 3] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 3]; m.data[3] = data[i * 3 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 3] = m.data[2]; data[i * 3 + j + 4] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const { Matrix<Element, 2, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; m.data[3] = data[i * 3 + j + 3]; m.data[4] = data[i * 3 + j + 4]; m.data[5] = data[i * 3 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; data[i * 3 + j + 3] = m.data[3]; data[i * 3 + j + 4] = m.data[4]; data[i * 3 + j + 5] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 3]; m.data[2] = data[i * 3 + j + 6]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 3] = m.data[1]; data[i * 3 + j + 6] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const { Matrix<Element, 3, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 3]; m.data[3] = data[i * 3 + j + 4]; m.data[4] = data[i * 3 + j + 6]; m.data[5] = data[i * 3 + j + 7]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 3] = m.data[2]; data[i * 3 + j + 4] = m.data[3]; data[i * 3 + j + 6] = m.data[4]; data[i * 3 + j + 7] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const { Matrix<Element, 3, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; m.data[3] = data[i * 3 + j + 3]; m.data[4] = data[i * 3 + j + 4]; m.data[5] = data[i * 3 + j + 5]; m.data[6] = data[i * 3 + j + 6]; m.data[7] = data[i * 3 + j + 7]; m.data[8] = data[i * 3 + j + 8]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; data[i * 3 + j + 3] = m.data[3]; data[i * 3 + j + 4] = m.data[4]; data[i * 3 + j + 5] = m.data[5]; data[i * 3 + j + 6] = m.data[6]; data[i * 3 + j + 7] = m.data[7]; data[i * 3 + j + 8] = m.data[8]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const { Matrix<Element, 4, 1> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 3]; m.data[2] = data[i * 3 + j + 6]; m.data[3] = data[i * 3 + j + 9]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 3] = m.data[1]; data[i * 3 + j + 6] = m.data[2]; data[i * 3 + j + 9] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> column(int j) const { return slice_4x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) { return set_slice_4x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const { Matrix<Element, 4, 2> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 3]; m.data[3] = data[i * 3 + j + 4]; m.data[4] = data[i * 3 + j + 6]; m.data[5] = data[i * 3 + j + 7]; m.data[6] = data[i * 3 + j + 9]; m.data[7] = data[i * 3 + j + 10]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 3] = m.data[2]; data[i * 3 + j + 4] = m.data[3]; data[i * 3 + j + 6] = m.data[4]; data[i * 3 + j + 7] = m.data[5]; data[i * 3 + j + 9] = m.data[6]; data[i * 3 + j + 10] = m.data[7]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> slice_4x3(int i = 0, int j = 0) const { Matrix<Element, 4, 3> m; m.data[0] = data[i * 3 + j + 0]; m.data[1] = data[i * 3 + j + 1]; m.data[2] = data[i * 3 + j + 2]; m.data[3] = data[i * 3 + j + 3]; m.data[4] = data[i * 3 + j + 4]; m.data[5] = data[i * 3 + j + 5]; m.data[6] = data[i * 3 + j + 6]; m.data[7] = data[i * 3 + j + 7]; m.data[8] = data[i * 3 + j + 8]; m.data[9] = data[i * 3 + j + 9]; m.data[10] = data[i * 3 + j + 10]; m.data[11] = data[i * 3 + j + 11]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x3(Matrix<Element, 4, 3> const &m, int i = 0, int j = 0) { data[i * 3 + j + 0] = m.data[0]; data[i * 3 + j + 1] = m.data[1]; data[i * 3 + j + 2] = m.data[2]; data[i * 3 + j + 3] = m.data[3]; data[i * 3 + j + 4] = m.data[4]; data[i * 3 + j + 5] = m.data[5]; data[i * 3 + j + 6] = m.data[6]; data[i * 3 + j + 7] = m.data[7]; data[i * 3 + j + 8] = m.data[8]; data[i * 3 + j + 9] = m.data[9]; data[i * 3 + j + 10] = m.data[10]; data[i * 3 + j + 11] = m.data[11]; return *this; } /// Forms a 4-by-3 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 2> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1) , lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1) , lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1) , lhs.at(3, 0), rhs.at(3, 0), rhs.at(3, 1)); } /// Forms a 4-by-3 matrix by horizontally concatenating a 4-by-2 matrix with a 4-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 4, 2> const & lhs, Matrix<Element, 4, 1> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0) , lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0) , lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0) , lhs.at(3, 0), lhs.at(3, 1), rhs.at(3, 0)); } /// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 1> const & rhs) const { return Matrix<Element, 4, 4>::hcat(*this, rhs); } /// Forms a 4-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 3-by-3 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 3, 3> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2) , lower.at(1, 0), lower.at(1, 1), lower.at(1, 2) , lower.at(2, 0), lower.at(2, 1), lower.at(2, 2)); } /// Forms a 4-by-3 matrix by vertically concatenating a 2-by-3 matrix with a 2-by-3 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 3> const & upper, Matrix<Element, 2, 3> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2) , upper.at(1, 0), upper.at(1, 1), upper.at(1, 2) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2) , lower.at(1, 0), lower.at(1, 1), lower.at(1, 2)); } /// Forms a 4-by-3 matrix by vertically concatenating a 3-by-3 matrix with a 1-by-3 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 3, 3> const & upper, Matrix<Element, 1, 3> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2) , upper.at(1, 0), upper.at(1, 1), upper.at(1, 2) , upper.at(2, 0), upper.at(2, 1), upper.at(2, 2) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)); } /// Forms a 4-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Matrix<Element, 1, 2> const & B, Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 2> const & D) { return Matrix( A, B.at(0, 0), B.at(0, 1) , C.at(0, 0), D.at(0, 0), D.at(0, 1) , C.at(1, 0), D.at(1, 0), D.at(1, 1) , C.at(2, 0), D.at(2, 0), D.at(2, 1) ); } /// Forms a 4-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 2> const & A, Element B, Matrix<Element, 3, 2> const & C, Matrix<Element, 3, 1> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B , C.at(0, 0), C.at(0, 1), D.at(0, 0) , C.at(1, 0), C.at(1, 1), D.at(1, 0) , C.at(2, 0), C.at(2, 1), D.at(2, 0) ); } /// Forms a 4-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 2> const & B, Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 2> const & D) { return Matrix( A.at(0, 0), B.at(0, 0), B.at(0, 1) , A.at(1, 0), B.at(1, 0), B.at(1, 1) , C.at(0, 0), D.at(0, 0), D.at(0, 1) , C.at(1, 0), D.at(1, 0), D.at(1, 1) ); } /// Forms a 4-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 1> const & B, Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 1> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0) , A.at(1, 0), A.at(1, 1), B.at(1, 0) , C.at(0, 0), C.at(0, 1), D.at(0, 0) , C.at(1, 0), C.at(1, 1), D.at(1, 0) ); } /// Forms a 4-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 2> const & B, Element C, Matrix<Element, 1, 2> const & D) { return Matrix( A.at(0, 0), B.at(0, 0), B.at(0, 1) , A.at(1, 0), B.at(1, 0), B.at(1, 1) , A.at(2, 0), B.at(2, 0), B.at(2, 1) , C, D.at(0, 0), D.at(0, 1) ); } /// Forms a 4-by-3 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 3, 2> const & A, Matrix<Element, 3, 1> const & B, Matrix<Element, 1, 2> const & C, Element D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0) , A.at(1, 0), A.at(1, 1), B.at(1, 0) , A.at(2, 0), A.at(2, 1), B.at(2, 0) , C.at(0, 0), C.at(0, 1), D ); } /// Elementwise add operator (4-by-3) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; result.data[6] = data[6] + rhs.data[6]; result.data[7] = data[7] + rhs.data[7]; result.data[8] = data[8] + rhs.data[8]; result.data[9] = data[9] + rhs.data[9]; result.data[10] = data[10] + rhs.data[10]; result.data[11] = data[11] + rhs.data[11]; return result; } /// Elementwise add operator (4-by-3) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (4-by-3) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; data[6] += rhs.data[6]; data[7] += rhs.data[7]; data[8] += rhs.data[8]; data[9] += rhs.data[9]; data[10] += rhs.data[10]; data[11] += rhs.data[11]; return *this; } /// Elementwise subtract operator (4-by-3) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; result.data[6] = data[6] - rhs.data[6]; result.data[7] = data[7] - rhs.data[7]; result.data[8] = data[8] - rhs.data[8]; result.data[9] = data[9] - rhs.data[9]; result.data[10] = data[10] - rhs.data[10]; result.data[11] = data[11] - rhs.data[11]; return result; } /// Elementwise subtract operator (4-by-3) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (4-by-3) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; data[6] -= rhs.data[6]; data[7] -= rhs.data[7]; data[8] -= rhs.data[8]; data[9] -= rhs.data[9]; data[10] -= rhs.data[10]; data[11] -= rhs.data[11]; return *this; } /// Elementwise multiply operator (4-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; result.data[6] = data[6] * rhs.data[6]; result.data[7] = data[7] * rhs.data[7]; result.data[8] = data[8] * rhs.data[8]; result.data[9] = data[9] * rhs.data[9]; result.data[10] = data[10] * rhs.data[10]; result.data[11] = data[11] * rhs.data[11]; return result; } /// Scalar multiply operator (4-by-3) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; result.data[6] = data[6] * s; result.data[7] = data[7] * s; result.data[8] = data[8] * s; result.data[9] = data[9] * s; result.data[10] = data[10] * s; result.data[11] = data[11] * s; return result; } /// Scalar multiply operator (4-by-3) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (4-by-3) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; data[6] *= s; data[7] *= s; data[8] *= s; data[9] *= s; data[10] *= s; data[11] *= s; return *this; } /// Elementwise divide operator (4-by-3) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; result.data[6] = data[6] / rhs.data[6]; result.data[7] = data[7] / rhs.data[7]; result.data[8] = data[8] / rhs.data[8]; result.data[9] = data[9] / rhs.data[9]; result.data[10] = data[10] / rhs.data[10]; result.data[11] = data[11] / rhs.data[11]; return result; } /// Scalar divide operator (4-by-3) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; result.data[6] = data[6] / s; result.data[7] = data[7] / s; result.data[8] = data[8] / s; result.data[9] = data[9] / s; result.data[10] = data[10] / s; result.data[11] = data[11] / s; return result; } /// Scalar divide operator (4-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (4-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; data[6] /= s; data[7] /= s; data[8] /= s; data[9] /= s; data[10] /= s; data[11] /= s; return *this; } /// Elementwise divide operator (4-by-3) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (4-by-3) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; data[6] /= rhs.data[6]; data[7] /= rhs.data[7]; data[8] /= rhs.data[8]; data[9] /= rhs.data[9]; data[10] /= rhs.data[10]; data[11] /= rhs.data[11]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; m.data[6] = -m.data[6]; m.data[7] = -m.data[7]; m.data[8] = -m.data[8]; m.data[9] = -m.data[9]; m.data[10] = -m.data[10]; m.data[11] = -m.data[11]; return m; } /// Matrix product of size 4-by-1-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> product( Matrix<Element, 3, 1> const &rhs, Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[3] * rhs.data[0]; accum.data[2] += data[6] * rhs.data[0]; accum.data[3] += data[9] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[4] * rhs.data[1]; accum.data[2] += data[7] * rhs.data[1]; accum.data[3] += data[10] * rhs.data[1]; // k=2 accum.data[0] += data[2] * rhs.data[2]; accum.data[1] += data[5] * rhs.data[2]; accum.data[2] += data[8] * rhs.data[2]; accum.data[3] += data[11] * rhs.data[2]; return accum; } /// Matrix product of size 4-by-1-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> operator*(Matrix<Element, 3, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> product( Matrix<Element, 3, 2> const &rhs, Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[3] * rhs.data[0]; accum.data[3] += data[3] * rhs.data[1]; accum.data[4] += data[6] * rhs.data[0]; accum.data[5] += data[6] * rhs.data[1]; accum.data[6] += data[9] * rhs.data[0]; accum.data[7] += data[9] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[4] * rhs.data[2]; accum.data[3] += data[4] * rhs.data[3]; accum.data[4] += data[7] * rhs.data[2]; accum.data[5] += data[7] * rhs.data[3]; accum.data[6] += data[10] * rhs.data[2]; accum.data[7] += data[10] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; accum.data[2] += data[5] * rhs.data[4]; accum.data[3] += data[5] * rhs.data[5]; accum.data[4] += data[8] * rhs.data[4]; accum.data[5] += data[8] * rhs.data[5]; accum.data[6] += data[11] * rhs.data[4]; accum.data[7] += data[11] * rhs.data[5]; return accum; } /// Matrix product of size 4-by-2-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> operator*(Matrix<Element, 3, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> product( Matrix<Element, 3, 3> const &rhs, Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[3] * rhs.data[0]; accum.data[4] += data[3] * rhs.data[1]; accum.data[5] += data[3] * rhs.data[2]; accum.data[6] += data[6] * rhs.data[0]; accum.data[7] += data[6] * rhs.data[1]; accum.data[8] += data[6] * rhs.data[2]; accum.data[9] += data[9] * rhs.data[0]; accum.data[10] += data[9] * rhs.data[1]; accum.data[11] += data[9] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[4] * rhs.data[3]; accum.data[4] += data[4] * rhs.data[4]; accum.data[5] += data[4] * rhs.data[5]; accum.data[6] += data[7] * rhs.data[3]; accum.data[7] += data[7] * rhs.data[4]; accum.data[8] += data[7] * rhs.data[5]; accum.data[9] += data[10] * rhs.data[3]; accum.data[10] += data[10] * rhs.data[4]; accum.data[11] += data[10] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; accum.data[3] += data[5] * rhs.data[6]; accum.data[4] += data[5] * rhs.data[7]; accum.data[5] += data[5] * rhs.data[8]; accum.data[6] += data[8] * rhs.data[6]; accum.data[7] += data[8] * rhs.data[7]; accum.data[8] += data[8] * rhs.data[8]; accum.data[9] += data[11] * rhs.data[6]; accum.data[10] += data[11] * rhs.data[7]; accum.data[11] += data[11] * rhs.data[8]; return accum; } /// Matrix product of size 4-by-3-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> operator*(Matrix<Element, 3, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-3-by-3 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) { *this = product(rhs); return *this; } /// Matrix product of size 4-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> product( Matrix<Element, 3, 4> const &rhs, Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[3] * rhs.data[0]; accum.data[5] += data[3] * rhs.data[1]; accum.data[6] += data[3] * rhs.data[2]; accum.data[7] += data[3] * rhs.data[3]; accum.data[8] += data[6] * rhs.data[0]; accum.data[9] += data[6] * rhs.data[1]; accum.data[10] += data[6] * rhs.data[2]; accum.data[11] += data[6] * rhs.data[3]; accum.data[12] += data[9] * rhs.data[0]; accum.data[13] += data[9] * rhs.data[1]; accum.data[14] += data[9] * rhs.data[2]; accum.data[15] += data[9] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[4] * rhs.data[4]; accum.data[5] += data[4] * rhs.data[5]; accum.data[6] += data[4] * rhs.data[6]; accum.data[7] += data[4] * rhs.data[7]; accum.data[8] += data[7] * rhs.data[4]; accum.data[9] += data[7] * rhs.data[5]; accum.data[10] += data[7] * rhs.data[6]; accum.data[11] += data[7] * rhs.data[7]; accum.data[12] += data[10] * rhs.data[4]; accum.data[13] += data[10] * rhs.data[5]; accum.data[14] += data[10] * rhs.data[6]; accum.data[15] += data[10] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; accum.data[4] += data[5] * rhs.data[8]; accum.data[5] += data[5] * rhs.data[9]; accum.data[6] += data[5] * rhs.data[10]; accum.data[7] += data[5] * rhs.data[11]; accum.data[8] += data[8] * rhs.data[8]; accum.data[9] += data[8] * rhs.data[9]; accum.data[10] += data[8] * rhs.data[10]; accum.data[11] += data[8] * rhs.data[11]; accum.data[12] += data[11] * rhs.data[8]; accum.data[13] += data[11] * rhs.data[9]; accum.data[14] += data[11] * rhs.data[10]; accum.data[15] += data[11] * rhs.data[11]; return accum; } /// Matrix product of size 4-by-4-by-3 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> operator*(Matrix<Element, 3, 4> const &rhs) const { return product(rhs); } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; accum += data[6]; accum += data[7]; accum += data[8]; accum += data[9]; accum += data[10]; accum += data[11]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; accum += data[6] * data[6]; accum += data[7] * data[7]; accum += data[8] * data[8]; accum += data[9] * data[9]; accum += data[10] * data[10]; accum += data[11] * data[11]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[4]; accum += data[8]; return accum; } }; /// Template alias for 4-by-3 matrix template <typename Element> using Matrix4x3 = Matrix<Element, 4, 3>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix4x3<Element> make_Matrix4x3( Element _0_0, Element _0_1, Element _0_2, Element _1_0, Element _1_1, Element _1_2, Element _2_0, Element _2_1, Element _2_2, Element _3_0, Element _3_1, Element _3_2 ) { return Matrix4x3<Element>( _0_0, _0_1, _0_2, _1_0, _1_1, _1_2, _2_0, _2_1, _2_2, _3_0, _3_1, _3_2 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// 4-by-4 matrix template class definition template <typename Element_> struct Matrix<Element_, 4, 4> { // // Type definitions // /// Element data type using Element = Element_; /// Number of rows in matrix static int const kRows = 4; /// Number of columns in matrix static int const kColumns = 4; /// Layout of matrix in underlying array using Layout = layout::RowMajor; /// Number of elements in matrix static int const kCount = 16; // // Data members // /// Elements of the matrix in row-major layout Array<Element, kCount> data; // // Methods // /// Constructs a zero matrix CUTLASS_HOST_DEVICE Matrix() { data.clear(); } /// Copy constructor for a 4-by-4 matrix CUTLASS_HOST_DEVICE Matrix(Matrix const &rhs) { data = rhs.data; } /// Constucts a 4-by-4 matrix from scalar elements CUTLASS_HOST_DEVICE Matrix( Element _0_0, Element _0_1, Element _0_2, Element _0_3, Element _1_0, Element _1_1, Element _1_2, Element _1_3, Element _2_0, Element _2_1, Element _2_2, Element _2_3, Element _3_0, Element _3_1, Element _3_2, Element _3_3 ) { data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3; data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3; data[8] = _2_0; data[9] = _2_1; data[10] = _2_2; data[11] = _2_3; data[12] = _3_0; data[13] = _3_1; data[14] = _3_2; data[15] = _3_3; } /// Constucts a 4-by-4 matrix from row vectors CUTLASS_HOST_DEVICE Matrix( Matrix<Element, 1, 4> const &row_0, Matrix<Element, 1, 4> const &row_1, Matrix<Element, 1, 4> const &row_2, Matrix<Element, 1, 4> const &row_3 ) { data[0] = row_0.data[0]; data[1] = row_0.data[1]; data[2] = row_0.data[2]; data[3] = row_0.data[3]; data[4] = row_1.data[0]; data[5] = row_1.data[1]; data[6] = row_1.data[2]; data[7] = row_1.data[3]; data[8] = row_2.data[0]; data[9] = row_2.data[1]; data[10] = row_2.data[2]; data[11] = row_2.data[3]; data[12] = row_3.data[0]; data[13] = row_3.data[1]; data[14] = row_3.data[2]; data[15] = row_3.data[3]; } /// Static method to construct a 4-by-4 matrix from column vectors CUTLASS_HOST_DEVICE static Matrix from_columns( Matrix<Element, 4, 1> const &column_0, Matrix<Element, 4, 1> const &column_1, Matrix<Element, 4, 1> const &column_2, Matrix<Element, 4, 1> const &column_3 ) { Matrix result; result.data[0] = column_0.data[0]; result.data[1] = column_1.data[0]; result.data[2] = column_2.data[0]; result.data[3] = column_3.data[0]; result.data[4] = column_0.data[1]; result.data[5] = column_1.data[1]; result.data[6] = column_2.data[1]; result.data[7] = column_3.data[1]; result.data[8] = column_0.data[2]; result.data[9] = column_1.data[2]; result.data[10] = column_2.data[2]; result.data[11] = column_3.data[2]; result.data[12] = column_0.data[3]; result.data[13] = column_1.data[3]; result.data[14] = column_2.data[3]; result.data[15] = column_3.data[3]; return result; } /// Constructs an identity matrix CUTLASS_HOST_DEVICE static Matrix identity() { Matrix m; m.data[0] = Element(1); m.data[5] = Element(1); m.data[10] = Element(1); m.data[15] = Element(1); return m; } /// Constructs a matrix from a uniform element CUTLASS_HOST_DEVICE static Matrix uniform(Element s) { Matrix m; m.data[0] = s; m.data[1] = s; m.data[2] = s; m.data[3] = s; m.data[4] = s; m.data[5] = s; m.data[6] = s; m.data[7] = s; m.data[8] = s; m.data[9] = s; m.data[10] = s; m.data[11] = s; m.data[12] = s; m.data[13] = s; m.data[14] = s; m.data[15] = s; return m; } /// Constructs a matrix from a uniform element 1 CUTLASS_HOST_DEVICE static Matrix ones() { return uniform(Element(1)); } /// Constructs a matrix from a uniform element 0 CUTLASS_HOST_DEVICE static Matrix zero() { return Matrix(); } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 4, 1> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[5] = diag.data[1]; m.data[10] = diag.data[2]; m.data[15] = diag.data[3]; return m; } /// Constructs a matrix from elements along its diagonal CUTLASS_HOST_DEVICE static Matrix from_diagonal(Matrix<Element, 1, 4> const &diag) { Matrix m; m.data[0] = diag.data[0]; m.data[5] = diag.data[1]; m.data[10] = diag.data[2]; m.data[15] = diag.data[3]; return m; } /// Gets an array of diagonal elements CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> diagonal() const { Matrix<Element, 4, 1> diag; diag.data[0] = data[0]; diag.data[1] = data[5]; diag.data[2] = data[10]; diag.data[3] = data[15]; return diag; } /// Returns a transposed matrix CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> transpose() const { Matrix<Element, 4, 4> mt; mt.data[0] = data[0]; mt.data[4] = data[1]; mt.data[8] = data[2]; mt.data[12] = data[3]; mt.data[1] = data[4]; mt.data[5] = data[5]; mt.data[9] = data[6]; mt.data[13] = data[7]; mt.data[2] = data[8]; mt.data[6] = data[9]; mt.data[10] = data[10]; mt.data[14] = data[11]; mt.data[3] = data[12]; mt.data[7] = data[13]; mt.data[11] = data[14]; mt.data[15] = data[15]; return mt; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(int i, int j) const { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(int i, int j) { return data[i * 4 + j]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element at(Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & at(Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element &at(int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element at(int offset) const { return data[offset]; } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element operator[](Coord<2> const &coord) const { return at(coord[0], coord[1]); } /// Accesses an element by coordinate CUTLASS_HOST_DEVICE Element & operator[](Coord<2> const &coord) { return at(coord[0], coord[1]); } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element & operator[](int offset) { return data[offset]; } /// Accesses an element by offset CUTLASS_HOST_DEVICE Element operator[](int offset) const { return data[offset]; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const { Matrix<Element, 1, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const { Matrix<Element, 1, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const { Matrix<Element, 1, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 1, 4> row(int i) const { return slice_1x4(i, 0); } CUTLASS_HOST_DEVICE Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) { return set_slice_1x4(v, i, 0); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const { Matrix<Element, 2, 1> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 4]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 4] = m.data[1]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const { Matrix<Element, 2, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 4]; m.data[3] = data[i * 4 + j + 5]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 4] = m.data[2]; data[i * 4 + j + 5] = m.data[3]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const { Matrix<Element, 2, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 4]; m.data[4] = data[i * 4 + j + 5]; m.data[5] = data[i * 4 + j + 6]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 4] = m.data[3]; data[i * 4 + j + 5] = m.data[4]; data[i * 4 + j + 6] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const { Matrix<Element, 2, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; m.data[4] = data[i * 4 + j + 4]; m.data[5] = data[i * 4 + j + 5]; m.data[6] = data[i * 4 + j + 6]; m.data[7] = data[i * 4 + j + 7]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; data[i * 4 + j + 4] = m.data[4]; data[i * 4 + j + 5] = m.data[5]; data[i * 4 + j + 6] = m.data[6]; data[i * 4 + j + 7] = m.data[7]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const { Matrix<Element, 3, 1> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 4]; m.data[2] = data[i * 4 + j + 8]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 4] = m.data[1]; data[i * 4 + j + 8] = m.data[2]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const { Matrix<Element, 3, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 4]; m.data[3] = data[i * 4 + j + 5]; m.data[4] = data[i * 4 + j + 8]; m.data[5] = data[i * 4 + j + 9]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 4] = m.data[2]; data[i * 4 + j + 5] = m.data[3]; data[i * 4 + j + 8] = m.data[4]; data[i * 4 + j + 9] = m.data[5]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const { Matrix<Element, 3, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 4]; m.data[4] = data[i * 4 + j + 5]; m.data[5] = data[i * 4 + j + 6]; m.data[6] = data[i * 4 + j + 8]; m.data[7] = data[i * 4 + j + 9]; m.data[8] = data[i * 4 + j + 10]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 4] = m.data[3]; data[i * 4 + j + 5] = m.data[4]; data[i * 4 + j + 6] = m.data[5]; data[i * 4 + j + 8] = m.data[6]; data[i * 4 + j + 9] = m.data[7]; data[i * 4 + j + 10] = m.data[8]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 3, 4> slice_3x4(int i = 0, int j = 0) const { Matrix<Element, 3, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; m.data[4] = data[i * 4 + j + 4]; m.data[5] = data[i * 4 + j + 5]; m.data[6] = data[i * 4 + j + 6]; m.data[7] = data[i * 4 + j + 7]; m.data[8] = data[i * 4 + j + 8]; m.data[9] = data[i * 4 + j + 9]; m.data[10] = data[i * 4 + j + 10]; m.data[11] = data[i * 4 + j + 11]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_3x4(Matrix<Element, 3, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; data[i * 4 + j + 4] = m.data[4]; data[i * 4 + j + 5] = m.data[5]; data[i * 4 + j + 6] = m.data[6]; data[i * 4 + j + 7] = m.data[7]; data[i * 4 + j + 8] = m.data[8]; data[i * 4 + j + 9] = m.data[9]; data[i * 4 + j + 10] = m.data[10]; data[i * 4 + j + 11] = m.data[11]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const { Matrix<Element, 4, 1> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 4]; m.data[2] = data[i * 4 + j + 8]; m.data[3] = data[i * 4 + j + 12]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 4] = m.data[1]; data[i * 4 + j + 8] = m.data[2]; data[i * 4 + j + 12] = m.data[3]; return *this; } CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> column(int j) const { return slice_4x1(0, j); } CUTLASS_HOST_DEVICE Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) { return set_slice_4x1(v, 0, j); } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const { Matrix<Element, 4, 2> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 4]; m.data[3] = data[i * 4 + j + 5]; m.data[4] = data[i * 4 + j + 8]; m.data[5] = data[i * 4 + j + 9]; m.data[6] = data[i * 4 + j + 12]; m.data[7] = data[i * 4 + j + 13]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 4] = m.data[2]; data[i * 4 + j + 5] = m.data[3]; data[i * 4 + j + 8] = m.data[4]; data[i * 4 + j + 9] = m.data[5]; data[i * 4 + j + 12] = m.data[6]; data[i * 4 + j + 13] = m.data[7]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> slice_4x3(int i = 0, int j = 0) const { Matrix<Element, 4, 3> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 4]; m.data[4] = data[i * 4 + j + 5]; m.data[5] = data[i * 4 + j + 6]; m.data[6] = data[i * 4 + j + 8]; m.data[7] = data[i * 4 + j + 9]; m.data[8] = data[i * 4 + j + 10]; m.data[9] = data[i * 4 + j + 12]; m.data[10] = data[i * 4 + j + 13]; m.data[11] = data[i * 4 + j + 14]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x3(Matrix<Element, 4, 3> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 4] = m.data[3]; data[i * 4 + j + 5] = m.data[4]; data[i * 4 + j + 6] = m.data[5]; data[i * 4 + j + 8] = m.data[6]; data[i * 4 + j + 9] = m.data[7]; data[i * 4 + j + 10] = m.data[8]; data[i * 4 + j + 12] = m.data[9]; data[i * 4 + j + 13] = m.data[10]; data[i * 4 + j + 14] = m.data[11]; return *this; } /// Gets a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> slice_4x4(int i = 0, int j = 0) const { Matrix<Element, 4, 4> m; m.data[0] = data[i * 4 + j + 0]; m.data[1] = data[i * 4 + j + 1]; m.data[2] = data[i * 4 + j + 2]; m.data[3] = data[i * 4 + j + 3]; m.data[4] = data[i * 4 + j + 4]; m.data[5] = data[i * 4 + j + 5]; m.data[6] = data[i * 4 + j + 6]; m.data[7] = data[i * 4 + j + 7]; m.data[8] = data[i * 4 + j + 8]; m.data[9] = data[i * 4 + j + 9]; m.data[10] = data[i * 4 + j + 10]; m.data[11] = data[i * 4 + j + 11]; m.data[12] = data[i * 4 + j + 12]; m.data[13] = data[i * 4 + j + 13]; m.data[14] = data[i * 4 + j + 14]; m.data[15] = data[i * 4 + j + 15]; return m; } /// Overwrites a submatrix with optional offset CUTLASS_HOST_DEVICE Matrix & set_slice_4x4(Matrix<Element, 4, 4> const &m, int i = 0, int j = 0) { data[i * 4 + j + 0] = m.data[0]; data[i * 4 + j + 1] = m.data[1]; data[i * 4 + j + 2] = m.data[2]; data[i * 4 + j + 3] = m.data[3]; data[i * 4 + j + 4] = m.data[4]; data[i * 4 + j + 5] = m.data[5]; data[i * 4 + j + 6] = m.data[6]; data[i * 4 + j + 7] = m.data[7]; data[i * 4 + j + 8] = m.data[8]; data[i * 4 + j + 9] = m.data[9]; data[i * 4 + j + 10] = m.data[10]; data[i * 4 + j + 11] = m.data[11]; data[i * 4 + j + 12] = m.data[12]; data[i * 4 + j + 13] = m.data[13]; data[i * 4 + j + 14] = m.data[14]; data[i * 4 + j + 15] = m.data[15]; return *this; } /// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-3 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 3> const & rhs) { return Matrix( lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2) , lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2) , lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1), rhs.at(2, 2) , lhs.at(3, 0), rhs.at(3, 0), rhs.at(3, 1), rhs.at(3, 2)); } /// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-2 matrix with a 4-by-2 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 4, 2> const & lhs, Matrix<Element, 4, 2> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1) , lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1) , lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0), rhs.at(2, 1) , lhs.at(3, 0), lhs.at(3, 1), rhs.at(3, 0), rhs.at(3, 1)); } /// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-3 matrix with a 4-by-1 matrix CUTLASS_HOST_DEVICE static Matrix hcat(Matrix<Element, 4, 3> const & lhs, Matrix<Element, 4, 1> const & rhs) { return Matrix( lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0) , lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0) , lhs.at(2, 0), lhs.at(2, 1), lhs.at(2, 2), rhs.at(2, 0) , lhs.at(3, 0), lhs.at(3, 1), lhs.at(3, 2), rhs.at(3, 0)); } /// Forms a 4-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 3-by-4 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 3, 4> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3) , lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3) , lower.at(2, 0), lower.at(2, 1), lower.at(2, 2), lower.at(2, 3)); } /// Forms a 4-by-4 matrix by vertically concatenating a 2-by-4 matrix with a 2-by-4 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 2, 4> const & upper, Matrix<Element, 2, 4> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3) , upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3) , lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3)); } /// Forms a 4-by-4 matrix by vertically concatenating a 3-by-4 matrix with a 1-by-4 matrix CUTLASS_HOST_DEVICE static Matrix vcat(Matrix<Element, 3, 4> const & upper, Matrix<Element, 1, 4> const & lower) { return Matrix( upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3) , upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3) , upper.at(2, 0), upper.at(2, 1), upper.at(2, 2), upper.at(2, 3) , lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Element A, Matrix<Element, 1, 3> const & B, Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 3> const & D) { return Matrix( A, B.at(0, 0), B.at(0, 1), B.at(0, 2) , C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2) , C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2) , C.at(2, 0), D.at(2, 0), D.at(2, 1), D.at(2, 2) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B, Matrix<Element, 3, 2> const & C, Matrix<Element, 3, 2> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1) , C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1) , C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1) , C.at(2, 0), C.at(2, 1), D.at(2, 0), D.at(2, 1) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 1, 3> const & A, Element B, Matrix<Element, 3, 3> const & C, Matrix<Element, 3, 1> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), A.at(0, 2), B , C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0) , C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0) , C.at(2, 0), C.at(2, 1), C.at(2, 2), D.at(2, 0) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 3> const & B, Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 3> const & D) { return Matrix( A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2) , A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2) , C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2) , C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 2> const & B, Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 2> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1) , A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1) , C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1) , C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 2, 3> const & A, Matrix<Element, 2, 1> const & B, Matrix<Element, 2, 3> const & C, Matrix<Element, 2, 1> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0) , A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0) , C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0) , C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 3> const & B, Element C, Matrix<Element, 1, 3> const & D) { return Matrix( A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2) , A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2) , A.at(2, 0), B.at(2, 0), B.at(2, 1), B.at(2, 2) , C, D.at(0, 0), D.at(0, 1), D.at(0, 2) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 3, 2> const & A, Matrix<Element, 3, 2> const & B, Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) { return Matrix( A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1) , A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1) , A.at(2, 0), A.at(2, 1), B.at(2, 0), B.at(2, 1) , C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1) ); } /// Forms a 4-by-4 matrix by concatenating four components CUTLASS_HOST_DEVICE static Matrix block( Matrix<Element, 3, 3> const & A, Matrix<Element, 3, 1> const & B, Matrix<Element, 1, 3> const & C, Element D) { return Matrix( A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0) , A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0) , A.at(2, 0), A.at(2, 1), A.at(2, 2), B.at(2, 0) , C.at(0, 0), C.at(0, 1), C.at(0, 2), D ); } /// Elementwise add operator (4-by-4) CUTLASS_HOST_DEVICE Matrix add(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] + rhs.data[0]; result.data[1] = data[1] + rhs.data[1]; result.data[2] = data[2] + rhs.data[2]; result.data[3] = data[3] + rhs.data[3]; result.data[4] = data[4] + rhs.data[4]; result.data[5] = data[5] + rhs.data[5]; result.data[6] = data[6] + rhs.data[6]; result.data[7] = data[7] + rhs.data[7]; result.data[8] = data[8] + rhs.data[8]; result.data[9] = data[9] + rhs.data[9]; result.data[10] = data[10] + rhs.data[10]; result.data[11] = data[11] + rhs.data[11]; result.data[12] = data[12] + rhs.data[12]; result.data[13] = data[13] + rhs.data[13]; result.data[14] = data[14] + rhs.data[14]; result.data[15] = data[15] + rhs.data[15]; return result; } /// Elementwise add operator (4-by-4) CUTLASS_HOST_DEVICE Matrix operator +(Matrix const &rhs) const { return add(rhs); } /// Elementwise add operator (4-by-4) CUTLASS_HOST_DEVICE Matrix & operator +=(Matrix const &rhs) { data[0] += rhs.data[0]; data[1] += rhs.data[1]; data[2] += rhs.data[2]; data[3] += rhs.data[3]; data[4] += rhs.data[4]; data[5] += rhs.data[5]; data[6] += rhs.data[6]; data[7] += rhs.data[7]; data[8] += rhs.data[8]; data[9] += rhs.data[9]; data[10] += rhs.data[10]; data[11] += rhs.data[11]; data[12] += rhs.data[12]; data[13] += rhs.data[13]; data[14] += rhs.data[14]; data[15] += rhs.data[15]; return *this; } /// Elementwise subtract operator (4-by-4) CUTLASS_HOST_DEVICE Matrix subtract(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] - rhs.data[0]; result.data[1] = data[1] - rhs.data[1]; result.data[2] = data[2] - rhs.data[2]; result.data[3] = data[3] - rhs.data[3]; result.data[4] = data[4] - rhs.data[4]; result.data[5] = data[5] - rhs.data[5]; result.data[6] = data[6] - rhs.data[6]; result.data[7] = data[7] - rhs.data[7]; result.data[8] = data[8] - rhs.data[8]; result.data[9] = data[9] - rhs.data[9]; result.data[10] = data[10] - rhs.data[10]; result.data[11] = data[11] - rhs.data[11]; result.data[12] = data[12] - rhs.data[12]; result.data[13] = data[13] - rhs.data[13]; result.data[14] = data[14] - rhs.data[14]; result.data[15] = data[15] - rhs.data[15]; return result; } /// Elementwise subtract operator (4-by-4) CUTLASS_HOST_DEVICE Matrix operator -(Matrix const &rhs) const { return subtract(rhs); } /// Elementwise subtract operator (4-by-4) CUTLASS_HOST_DEVICE Matrix & operator -=(Matrix const &rhs) { data[0] -= rhs.data[0]; data[1] -= rhs.data[1]; data[2] -= rhs.data[2]; data[3] -= rhs.data[3]; data[4] -= rhs.data[4]; data[5] -= rhs.data[5]; data[6] -= rhs.data[6]; data[7] -= rhs.data[7]; data[8] -= rhs.data[8]; data[9] -= rhs.data[9]; data[10] -= rhs.data[10]; data[11] -= rhs.data[11]; data[12] -= rhs.data[12]; data[13] -= rhs.data[13]; data[14] -= rhs.data[14]; data[15] -= rhs.data[15]; return *this; } /// Elementwise multiply operator (4-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] * rhs.data[0]; result.data[1] = data[1] * rhs.data[1]; result.data[2] = data[2] * rhs.data[2]; result.data[3] = data[3] * rhs.data[3]; result.data[4] = data[4] * rhs.data[4]; result.data[5] = data[5] * rhs.data[5]; result.data[6] = data[6] * rhs.data[6]; result.data[7] = data[7] * rhs.data[7]; result.data[8] = data[8] * rhs.data[8]; result.data[9] = data[9] * rhs.data[9]; result.data[10] = data[10] * rhs.data[10]; result.data[11] = data[11] * rhs.data[11]; result.data[12] = data[12] * rhs.data[12]; result.data[13] = data[13] * rhs.data[13]; result.data[14] = data[14] * rhs.data[14]; result.data[15] = data[15] * rhs.data[15]; return result; } /// Scalar multiply operator (4-by-4) CUTLASS_HOST_DEVICE Matrix multiply(Element const &s) const { Matrix result; result.data[0] = data[0] * s; result.data[1] = data[1] * s; result.data[2] = data[2] * s; result.data[3] = data[3] * s; result.data[4] = data[4] * s; result.data[5] = data[5] * s; result.data[6] = data[6] * s; result.data[7] = data[7] * s; result.data[8] = data[8] * s; result.data[9] = data[9] * s; result.data[10] = data[10] * s; result.data[11] = data[11] * s; result.data[12] = data[12] * s; result.data[13] = data[13] * s; result.data[14] = data[14] * s; result.data[15] = data[15] * s; return result; } /// Scalar multiply operator (4-by-4) CUTLASS_HOST_DEVICE Matrix operator *(Element const &s) const { return multiply(s); } /// Scalar multiply operator (4-by-4) CUTLASS_HOST_DEVICE Matrix & operator *=(Element const &s) { data[0] *= s; data[1] *= s; data[2] *= s; data[3] *= s; data[4] *= s; data[5] *= s; data[6] *= s; data[7] *= s; data[8] *= s; data[9] *= s; data[10] *= s; data[11] *= s; data[12] *= s; data[13] *= s; data[14] *= s; data[15] *= s; return *this; } /// Elementwise divide operator (4-by-4) CUTLASS_HOST_DEVICE Matrix divide(Matrix const &rhs) const { Matrix result; result.data[0] = data[0] / rhs.data[0]; result.data[1] = data[1] / rhs.data[1]; result.data[2] = data[2] / rhs.data[2]; result.data[3] = data[3] / rhs.data[3]; result.data[4] = data[4] / rhs.data[4]; result.data[5] = data[5] / rhs.data[5]; result.data[6] = data[6] / rhs.data[6]; result.data[7] = data[7] / rhs.data[7]; result.data[8] = data[8] / rhs.data[8]; result.data[9] = data[9] / rhs.data[9]; result.data[10] = data[10] / rhs.data[10]; result.data[11] = data[11] / rhs.data[11]; result.data[12] = data[12] / rhs.data[12]; result.data[13] = data[13] / rhs.data[13]; result.data[14] = data[14] / rhs.data[14]; result.data[15] = data[15] / rhs.data[15]; return result; } /// Scalar divide operator (4-by-4) CUTLASS_HOST_DEVICE Matrix divide(Element const &s) const { Matrix result; result.data[0] = data[0] / s; result.data[1] = data[1] / s; result.data[2] = data[2] / s; result.data[3] = data[3] / s; result.data[4] = data[4] / s; result.data[5] = data[5] / s; result.data[6] = data[6] / s; result.data[7] = data[7] / s; result.data[8] = data[8] / s; result.data[9] = data[9] / s; result.data[10] = data[10] / s; result.data[11] = data[11] / s; result.data[12] = data[12] / s; result.data[13] = data[13] / s; result.data[14] = data[14] / s; result.data[15] = data[15] / s; return result; } /// Scalar divide operator (4-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Element const &s) const { return divide(s); } /// Scalar divide operator (4-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Element const &s) { data[0] /= s; data[1] /= s; data[2] /= s; data[3] /= s; data[4] /= s; data[5] /= s; data[6] /= s; data[7] /= s; data[8] /= s; data[9] /= s; data[10] /= s; data[11] /= s; data[12] /= s; data[13] /= s; data[14] /= s; data[15] /= s; return *this; } /// Elementwise divide operator (4-by-4) CUTLASS_HOST_DEVICE Matrix operator /(Matrix const &rhs) const { return divide(rhs); } /// Elementwise divide operator (4-by-4) CUTLASS_HOST_DEVICE Matrix & operator /=(Matrix const &rhs) { data[0] /= rhs.data[0]; data[1] /= rhs.data[1]; data[2] /= rhs.data[2]; data[3] /= rhs.data[3]; data[4] /= rhs.data[4]; data[5] /= rhs.data[5]; data[6] /= rhs.data[6]; data[7] /= rhs.data[7]; data[8] /= rhs.data[8]; data[9] /= rhs.data[9]; data[10] /= rhs.data[10]; data[11] /= rhs.data[11]; data[12] /= rhs.data[12]; data[13] /= rhs.data[13]; data[14] /= rhs.data[14]; data[15] /= rhs.data[15]; return *this; } /// Negates each element of the matrix CUTLASS_HOST_DEVICE Matrix operator-() const { Matrix m; m.data[0] = -m.data[0]; m.data[1] = -m.data[1]; m.data[2] = -m.data[2]; m.data[3] = -m.data[3]; m.data[4] = -m.data[4]; m.data[5] = -m.data[5]; m.data[6] = -m.data[6]; m.data[7] = -m.data[7]; m.data[8] = -m.data[8]; m.data[9] = -m.data[9]; m.data[10] = -m.data[10]; m.data[11] = -m.data[11]; m.data[12] = -m.data[12]; m.data[13] = -m.data[13]; m.data[14] = -m.data[14]; m.data[15] = -m.data[15]; return m; } /// Matrix product of size 4-by-1-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> product( Matrix<Element, 4, 1> const &rhs, Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[4] * rhs.data[0]; accum.data[2] += data[8] * rhs.data[0]; accum.data[3] += data[12] * rhs.data[0]; // k=1 accum.data[0] += data[1] * rhs.data[1]; accum.data[1] += data[5] * rhs.data[1]; accum.data[2] += data[9] * rhs.data[1]; accum.data[3] += data[13] * rhs.data[1]; // k=2 accum.data[0] += data[2] * rhs.data[2]; accum.data[1] += data[6] * rhs.data[2]; accum.data[2] += data[10] * rhs.data[2]; accum.data[3] += data[14] * rhs.data[2]; // k=3 accum.data[0] += data[3] * rhs.data[3]; accum.data[1] += data[7] * rhs.data[3]; accum.data[2] += data[11] * rhs.data[3]; accum.data[3] += data[15] * rhs.data[3]; return accum; } /// Matrix product of size 4-by-1-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 1> operator*(Matrix<Element, 4, 1> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> product( Matrix<Element, 4, 2> const &rhs, Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[4] * rhs.data[0]; accum.data[3] += data[4] * rhs.data[1]; accum.data[4] += data[8] * rhs.data[0]; accum.data[5] += data[8] * rhs.data[1]; accum.data[6] += data[12] * rhs.data[0]; accum.data[7] += data[12] * rhs.data[1]; // k=1 accum.data[0] += data[1] * rhs.data[2]; accum.data[1] += data[1] * rhs.data[3]; accum.data[2] += data[5] * rhs.data[2]; accum.data[3] += data[5] * rhs.data[3]; accum.data[4] += data[9] * rhs.data[2]; accum.data[5] += data[9] * rhs.data[3]; accum.data[6] += data[13] * rhs.data[2]; accum.data[7] += data[13] * rhs.data[3]; // k=2 accum.data[0] += data[2] * rhs.data[4]; accum.data[1] += data[2] * rhs.data[5]; accum.data[2] += data[6] * rhs.data[4]; accum.data[3] += data[6] * rhs.data[5]; accum.data[4] += data[10] * rhs.data[4]; accum.data[5] += data[10] * rhs.data[5]; accum.data[6] += data[14] * rhs.data[4]; accum.data[7] += data[14] * rhs.data[5]; // k=3 accum.data[0] += data[3] * rhs.data[6]; accum.data[1] += data[3] * rhs.data[7]; accum.data[2] += data[7] * rhs.data[6]; accum.data[3] += data[7] * rhs.data[7]; accum.data[4] += data[11] * rhs.data[6]; accum.data[5] += data[11] * rhs.data[7]; accum.data[6] += data[15] * rhs.data[6]; accum.data[7] += data[15] * rhs.data[7]; return accum; } /// Matrix product of size 4-by-2-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 2> operator*(Matrix<Element, 4, 2> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> product( Matrix<Element, 4, 3> const &rhs, Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[4] * rhs.data[0]; accum.data[4] += data[4] * rhs.data[1]; accum.data[5] += data[4] * rhs.data[2]; accum.data[6] += data[8] * rhs.data[0]; accum.data[7] += data[8] * rhs.data[1]; accum.data[8] += data[8] * rhs.data[2]; accum.data[9] += data[12] * rhs.data[0]; accum.data[10] += data[12] * rhs.data[1]; accum.data[11] += data[12] * rhs.data[2]; // k=1 accum.data[0] += data[1] * rhs.data[3]; accum.data[1] += data[1] * rhs.data[4]; accum.data[2] += data[1] * rhs.data[5]; accum.data[3] += data[5] * rhs.data[3]; accum.data[4] += data[5] * rhs.data[4]; accum.data[5] += data[5] * rhs.data[5]; accum.data[6] += data[9] * rhs.data[3]; accum.data[7] += data[9] * rhs.data[4]; accum.data[8] += data[9] * rhs.data[5]; accum.data[9] += data[13] * rhs.data[3]; accum.data[10] += data[13] * rhs.data[4]; accum.data[11] += data[13] * rhs.data[5]; // k=2 accum.data[0] += data[2] * rhs.data[6]; accum.data[1] += data[2] * rhs.data[7]; accum.data[2] += data[2] * rhs.data[8]; accum.data[3] += data[6] * rhs.data[6]; accum.data[4] += data[6] * rhs.data[7]; accum.data[5] += data[6] * rhs.data[8]; accum.data[6] += data[10] * rhs.data[6]; accum.data[7] += data[10] * rhs.data[7]; accum.data[8] += data[10] * rhs.data[8]; accum.data[9] += data[14] * rhs.data[6]; accum.data[10] += data[14] * rhs.data[7]; accum.data[11] += data[14] * rhs.data[8]; // k=3 accum.data[0] += data[3] * rhs.data[9]; accum.data[1] += data[3] * rhs.data[10]; accum.data[2] += data[3] * rhs.data[11]; accum.data[3] += data[7] * rhs.data[9]; accum.data[4] += data[7] * rhs.data[10]; accum.data[5] += data[7] * rhs.data[11]; accum.data[6] += data[11] * rhs.data[9]; accum.data[7] += data[11] * rhs.data[10]; accum.data[8] += data[11] * rhs.data[11]; accum.data[9] += data[15] * rhs.data[9]; accum.data[10] += data[15] * rhs.data[10]; accum.data[11] += data[15] * rhs.data[11]; return accum; } /// Matrix product of size 4-by-3-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 3> operator*(Matrix<Element, 4, 3> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> product( Matrix<Element, 4, 4> const &rhs, Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>() ) const { // k=0 accum.data[0] += data[0] * rhs.data[0]; accum.data[1] += data[0] * rhs.data[1]; accum.data[2] += data[0] * rhs.data[2]; accum.data[3] += data[0] * rhs.data[3]; accum.data[4] += data[4] * rhs.data[0]; accum.data[5] += data[4] * rhs.data[1]; accum.data[6] += data[4] * rhs.data[2]; accum.data[7] += data[4] * rhs.data[3]; accum.data[8] += data[8] * rhs.data[0]; accum.data[9] += data[8] * rhs.data[1]; accum.data[10] += data[8] * rhs.data[2]; accum.data[11] += data[8] * rhs.data[3]; accum.data[12] += data[12] * rhs.data[0]; accum.data[13] += data[12] * rhs.data[1]; accum.data[14] += data[12] * rhs.data[2]; accum.data[15] += data[12] * rhs.data[3]; // k=1 accum.data[0] += data[1] * rhs.data[4]; accum.data[1] += data[1] * rhs.data[5]; accum.data[2] += data[1] * rhs.data[6]; accum.data[3] += data[1] * rhs.data[7]; accum.data[4] += data[5] * rhs.data[4]; accum.data[5] += data[5] * rhs.data[5]; accum.data[6] += data[5] * rhs.data[6]; accum.data[7] += data[5] * rhs.data[7]; accum.data[8] += data[9] * rhs.data[4]; accum.data[9] += data[9] * rhs.data[5]; accum.data[10] += data[9] * rhs.data[6]; accum.data[11] += data[9] * rhs.data[7]; accum.data[12] += data[13] * rhs.data[4]; accum.data[13] += data[13] * rhs.data[5]; accum.data[14] += data[13] * rhs.data[6]; accum.data[15] += data[13] * rhs.data[7]; // k=2 accum.data[0] += data[2] * rhs.data[8]; accum.data[1] += data[2] * rhs.data[9]; accum.data[2] += data[2] * rhs.data[10]; accum.data[3] += data[2] * rhs.data[11]; accum.data[4] += data[6] * rhs.data[8]; accum.data[5] += data[6] * rhs.data[9]; accum.data[6] += data[6] * rhs.data[10]; accum.data[7] += data[6] * rhs.data[11]; accum.data[8] += data[10] * rhs.data[8]; accum.data[9] += data[10] * rhs.data[9]; accum.data[10] += data[10] * rhs.data[10]; accum.data[11] += data[10] * rhs.data[11]; accum.data[12] += data[14] * rhs.data[8]; accum.data[13] += data[14] * rhs.data[9]; accum.data[14] += data[14] * rhs.data[10]; accum.data[15] += data[14] * rhs.data[11]; // k=3 accum.data[0] += data[3] * rhs.data[12]; accum.data[1] += data[3] * rhs.data[13]; accum.data[2] += data[3] * rhs.data[14]; accum.data[3] += data[3] * rhs.data[15]; accum.data[4] += data[7] * rhs.data[12]; accum.data[5] += data[7] * rhs.data[13]; accum.data[6] += data[7] * rhs.data[14]; accum.data[7] += data[7] * rhs.data[15]; accum.data[8] += data[11] * rhs.data[12]; accum.data[9] += data[11] * rhs.data[13]; accum.data[10] += data[11] * rhs.data[14]; accum.data[11] += data[11] * rhs.data[15]; accum.data[12] += data[15] * rhs.data[12]; accum.data[13] += data[15] * rhs.data[13]; accum.data[14] += data[15] * rhs.data[14]; accum.data[15] += data[15] * rhs.data[15]; return accum; } /// Matrix product of size 4-by-4-by-4 CUTLASS_HOST_DEVICE Matrix<Element, 4, 4> operator*(Matrix<Element, 4, 4> const &rhs) const { return product(rhs); } /// Matrix product of size 4-by-4-by-4 CUTLASS_HOST_DEVICE Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) { *this = product(rhs); return *this; } /// Returns the sum of elements CUTLASS_HOST_DEVICE Element sum(Element accum = Element()) const { accum += data[0]; accum += data[1]; accum += data[2]; accum += data[3]; accum += data[4]; accum += data[5]; accum += data[6]; accum += data[7]; accum += data[8]; accum += data[9]; accum += data[10]; accum += data[11]; accum += data[12]; accum += data[13]; accum += data[14]; accum += data[15]; return accum; } /// Returns the sum of squared elements CUTLASS_HOST_DEVICE Element norm(Element accum = Element()) const { accum += data[0] * data[0]; accum += data[1] * data[1]; accum += data[2] * data[2]; accum += data[3] * data[3]; accum += data[4] * data[4]; accum += data[5] * data[5]; accum += data[6] * data[6]; accum += data[7] * data[7]; accum += data[8] * data[8]; accum += data[9] * data[9]; accum += data[10] * data[10]; accum += data[11] * data[11]; accum += data[12] * data[12]; accum += data[13] * data[13]; accum += data[14] * data[14]; accum += data[15] * data[15]; return accum; } /// Returns square root of the norm CUTLASS_HOST_DEVICE Element magnitude() const { return fast_sqrt(norm()); } /// Returns the sum of diagonal elements CUTLASS_HOST_DEVICE Element trace(Element accum = Element()) const { accum += data[0]; accum += data[5]; accum += data[10]; accum += data[15]; return accum; } /// Returns 4-by-4 rotation matrix around the X axis CUTLASS_HOST_DEVICE static Matrix rotation_X(Element theta) { Matrix m = identity(); Element c = fast_cos(theta); Element s = fast_sin(theta); m.at(1, 1) = c; m.at(1, 2) = -s; m.at(2, 1) = s; m.at(2, 2) = c; return m; } /// Returns 4-by-4 rotation matrix around the Y axis CUTLASS_HOST_DEVICE static Matrix rotation_Y(Element theta) { Matrix m = identity(); Element c = fast_cos(theta); Element s = fast_sin(theta); m.at(0, 0) = c; m.at(2, 0) = -s; m.at(0, 2) = s; m.at(2, 2) = c; return m; } /// Returns 4-by-4 rotation matrix around the Z axis CUTLASS_HOST_DEVICE static Matrix rotation_Z(Element theta) { Matrix m = Matrix::identity(); Element c = fast_cos(theta); Element s = fast_sin(theta); m.at(0, 0) = c; m.at(0, 1) = -s; m.at(1, 0) = s; m.at(1, 1) = c; return m; } /// Returns a 4-by-4 rotation matrix around a unit-length axis CUTLASS_HOST_DEVICE static Matrix rotation(Element theta, Matrix<Element, 3, 1> const &u) { Element x = u.data[0]; Element y = u.data[1]; Element z = u.data[2]; Element c = fast_cos(theta); Element s = fast_sin(theta); Element one_minus_cos = Element(1) - fast_cos(theta); Matrix m; m.set_slice3x3({ c + x * x * one_minus_cos, x * y * one_minus_cos - z * s, x * z * one_minus_cos + y * s, y * x * one_minus_cos * z * s, c + y * y * one_minus_cos, y * z * one_minus_cos - x * s, z * x * one_minus_cos - y * s, z * y * one_minus_cos + x * s, c + z * z * one_minus_cos }); return m; } /// Returns a 4-by-4 reflection about the plane specified by the /// unit-length normal vector n_unit CUTLASS_HOST_DEVICE static Matrix reflection(Matrix<Element, 3, 1> const &n_unit) { Element a = n_unit.data[0]; Element b = n_unit.data[1]; Element c = n_unit.data[2]; Matrix m = Matrix::identity(); m.set_slice3x3({ Element(1) - Element(2) * a * a, Element(-2) * a * b, Element(-2) * a * c, Element(-2) * a * b, Element(1) - Element(2) * b * b, Element(-2) * b * c, Element(-2) * a * c, Element(-2) * b * c, Element(1) - Element(2) * c * c }); return m; } /// Returns a perspective projection matrix typical of OpenGL applications CUTLASS_HOST_DEVICE static Matrix perspective(Element near_plane, Element far_plane, Element fovH, Element fovV) { Element aspect = fovH / fovV; Element f = Element(cos(fovV)) / Element(fovH); Element Q = near_plane - far_plane; return Matrix( f / aspect, 0, 0, 0, 0, f, 0, 0, 0, 0, (near_plane + far_plane) / Q, Element(2) * far_plane * near_plane / Q, 0, 0, -1, 0 ); } CUTLASS_HOST_DEVICE static Matrix translation(Matrix<Element, 3, 1> const &v) { return Matrix( 1, 0, 0, v.data[0], 0, 1, 0, v.data[1], 0, 0, 1, v.data[2], 0, 0, 0, 1 ); } /// Computes the determinant of a 4-by-4 matrix CUTLASS_HOST_DEVICE Element determinant(Element accum = Element()) const { accum += at(0, 0) * Matrix<Element, 3, 3>({ at(1, 1), at(1, 2), at(1, 3), at(2, 1), at(2, 2), at(2, 3), at(3, 1), at(3, 2), at(3, 3) }).determinant(); accum -= at(0, 1) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 2), at(1, 3), at(2, 0), at(2, 2), at(2, 3), at(3, 0), at(3, 2), at(3, 3) }).determinant(); accum += at(0, 2) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 1), at(1, 3), at(2, 0), at(2, 1), at(2, 3), at(3, 0), at(3, 1), at(3, 3) }).determinant(); accum -= at(0, 3) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 1), at(1, 2), at(2, 0), at(2, 1), at(2, 2), at(3, 0), at(3, 1), at(3, 2) }).determinant(); return accum; } /// Computes the inverse of a 4-by-4 matrix (ignores the optional argument) CUTLASS_HOST_DEVICE Matrix inverse(Element ignore = 1) const { Matrix<Element, 2, 2> B = slice_2x2(0, 2); Matrix<Element, 2, 2> A = slice_2x2(0, 0); Matrix<Element, 2, 2> C = slice_2x2(2, 0); Matrix<Element, 2, 2> D = slice_2x2(2, 2); Matrix<Element, 2, 2> D_inv = D.inverse(); Matrix<Element, 2, 2> E = (A - B * D_inv * C).inverse(); return Matrix::block( E, -E * B * D_inv, -D_inv * C * E, D_inv + D_inv * C * E * B * D_inv ); } }; /// Template alias for 4-by-4 matrix template <typename Element> using Matrix4x4 = Matrix<Element, 4, 4>; /// Free funciton to infer element type from template arguments template <typename Element> CUTLASS_HOST_DEVICE Matrix4x4<Element> make_Matrix4x4( Element _0_0, Element _0_1, Element _0_2, Element _0_3, Element _1_0, Element _1_1, Element _1_2, Element _1_3, Element _2_0, Element _2_1, Element _2_2, Element _2_3, Element _3_0, Element _3_1, Element _3_2, Element _3_3 ) { return Matrix4x4<Element>( _0_0, _0_1, _0_2, _0_3, _1_0, _1_1, _1_2, _1_3, _2_0, _2_1, _2_2, _2_3, _3_0, _3_1, _3_2, _3_3 ); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Elementwise scalar multiplication template <typename Element, int Rows, int Columns> CUTLASS_HOST_DEVICE Matrix<Element, Rows, Columns> operator*(Element s, Matrix<Element, Rows, Columns> const &rhs) { return rhs.multiply(s); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/matrix.h/0
{ "file_path": "cutlass/include/cutlass/matrix.h", "repo_id": "cutlass", "token_count": 169298 }
35
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Kernel performing a reduction over one or more ranks of an affine tensor */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/fast_math.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/device_kernel.h" #include "cutlass/reduction/kernel/tensor_reduce_affine_strided.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reduction { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Tensor reduction operator on layouts which are affine template < int Rank, ///< Rank of source tensor (e.g. NDHWC => 5) int ReducedRank, ///< Rank of reduced tensor (includes contiguous, e.g. NC => 2) typename ElementOutput_, typename ElementSource_, typename ReductionOp_, int VectorLength = 1, typename ElementCompute_ = ElementOutput_, int Threads = 256, ///< Number of participating threads int BatchSize = 4 ///< Number of elements to load per batch > struct TensorReductionAffineStrided { static int const kRank = Rank; static int const kReducedRank = ReducedRank; static int const kVectorLength = VectorLength; static int const kInnerRank = kRank - kReducedRank; static int const kThreads = Threads; static int const kBatchSize = BatchSize; using ElementOutput = ElementOutput_; using ElementSource = ElementSource_; using ReductionOp = ReductionOp_; using ElementCompute = ElementCompute_; // // Data members // /// Internal status field Status status; /// Extent of tensor in source layout Coord<kRank> extent; /// Number of points in the outer index space int64_t outer_count; /// Number of elements in the inner index space int64_t inner_count; /// Number of workspaces needed int workspace_count; /// CUDA Grid shape (.x => contiguous, .y => outer, .z => inner) dim3 grid_shape; /// CUDA Threadblock shape (.x => contiguous, .y => outer, .z => inner) dim3 threadblock_shape; /// CUDA grid shape for the final reduction step if needed dim3 grid_final; /// CUDA threadblock shape for the final reduction step if needed dim3 threadblock_final; private: // // Methods // /// Helper to reshape 'count' such that it is less than 2 x 'ext' static int reshape_pow2(int ext, int count) { if (ext > count) { return 1; } int x = 1; for (; count >= ext * 2; ) { count >>= 1; x <<= 1; } return x; } public: /// Default ctor TensorReductionAffineStrided(): status(Status::kErrorInvalidProblem), extent(), outer_count(0), inner_count(0), workspace_count(0), grid_shape(0, 0, 0), threadblock_shape(0, 0, 0) { } /// Constructor TensorReductionAffineStrided( Coord<kRank> extent_, int target_threadblock_count = 128 ): status(Status::kSuccess), extent(extent_), outer_count(0), inner_count(0), workspace_count(0) { // // Plan the parallel mapping strategy. // outer_count = 1; inner_count = 1; // Compute number of elements in strided ranks for (int p = 0; p < kReducedRank - 1; ++p) { outer_count *= extent[p]; } for (int p = 0; p < kInnerRank; ++p) { inner_count *= extent[kReducedRank + p - 1]; } // Compute plan for the reduction int extent_c = extent[kRank - 1]; int vectors_c = (extent_c -1 + kVectorLength) / kVectorLength; // Determine CTA shape int cta_width = kThreads * kVectorLength; int cta_ways = reshape_pow2(extent_c, cta_width); int cta_threads_x = kThreads / cta_ways; threadblock_shape = dim3(cta_threads_x, 1, std::min(cta_ways, 64)); // This leads to an error. if (threadblock_shape.z > 1) { if (threadblock_shape.y != 1) { status = Status::kErrorInternal; return; } } // Determine grid shape int cta_count_x = (vectors_c + cta_threads_x - 1) / cta_threads_x; int cta_count_y = std::max(1, target_threadblock_count / cta_count_x); // Limit the number of CTAs assigned to outer dimension if (int64_t(cta_count_y * threadblock_shape.y) > outer_count) { cta_count_y = int(outer_count + threadblock_shape.y - 1) / threadblock_shape.y; } // Limit the number of CTAs assigned to inner dimension int cta_count_z = std::max(1, target_threadblock_count / cta_count_y); if (int64_t(cta_count_z * threadblock_shape.z) > inner_count) { cta_count_z = int(inner_count + threadblock_shape.z - 1) / threadblock_shape.z; } grid_shape = dim3(cta_count_x, cta_count_y, cta_count_z); workspace_count = (cta_count_z > 1 ? cta_count_z : 0); // Determine shape of final reduction kernel if needed grid_final = dim3(cta_count_x, int(outer_count)); threadblock_final = dim3(cta_threads_x, 1, 1); } /// Simple check to verify the object is initialized correctly bool good() const { return status == Status::kSuccess; } /// Size of one CTA's workspace int64_t workspace_stride() const { // Error condition if (!good()) { return 0; } int vector_size_bytes = kVectorLength * sizeof_bits<ElementCompute>::value / 8; return extent[kRank - 1] * vector_size_bytes; } /// Returns the size (in bytes) of a temporary workspace needed for reduction across CTAs int64_t workspace_size() const { // Error condition if (!good()) { return 0; } // No reduction across CTAs if (grid_shape.z == 1) { return 0; } return workspace_stride() * outer_count * grid_shape.z; } /// Performs a reduction Status reduce( ElementOutput *dst_ptr, ///< Pointer to destination tensor int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1) ElementSource const *src_ptr, ///< Pointer to source tensor int64_t src_stride[], ///< Stride vector (of length kRank - 1) void *device_workspace_ptr = nullptr, ///< Device workspace ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity ReductionOp reduction_op = ReductionOp(), ///< Reduction operator cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched // Initial status check if (!good()) { return status; } // Guard against null workspace if (workspace_count > 1 && device_workspace_ptr == nullptr) { return Status::kErrorWorkspaceNull; } // Define reduction kernel using ReductionKernel = kernel::TensorReductionAffineStrided< kRank, kReducedRank, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute, kThreads>; using FinalReductionKernel = kernel::TensorReductionAffineStridedFinal< kRank, kReducedRank, ElementOutput, ElementSource, ReductionOp, kVectorLength, ElementCompute, kThreads>; using Params = typename ReductionKernel::Params; // Construct the parameters Params params( extent, dst_ptr, dst_stride, src_ptr, src_stride, static_cast<ElementCompute *>(device_workspace_ptr), workspace_stride(), workspace_count, reduction_op, reduction_identity); // Shared memory size int shared_mem_bytes = sizeof(typename ReductionKernel::SharedStorage); // Launch the kernel Kernel<ReductionKernel><<< grid_shape, threadblock_shape, shared_mem_bytes, stream >>>(params); // Check error condition if (cudaPeekAtLastError() == cudaSuccess) { status = Status::kSuccess; } else { status = Status::kErrorInternal; } // Final reduction kernel if (workspace_count) { Kernel<FinalReductionKernel><<< grid_final, threadblock_final, 0, stream >>>(params); // Check error condition if (cudaPeekAtLastError() == cudaSuccess) { status = Status::kSuccess; } else { status = Status::kErrorInternal; } } return status; } /// Helper to use overloaded function call operator Status operator()( ElementOutput *dst_ptr, ///< Pointer to destination tensor int64_t dst_stride[], ///< Stride vector (of length kReducedRank - 1) ElementSource const *src_ptr, ///< Pointer to source tensor int64_t src_stride[], ///< Stride vector (of length kRank - 1) void *device_workspace_ptr = nullptr, ///< Pointer to device workspace ElementCompute reduction_identity = ElementCompute(), ///< Reduciton identity ReductionOp reduction_op = ReductionOp(), ///< Reduction operator cudaStream_t stream = nullptr) { ///< CUDA Stream into which all kernels are launched return reduce( dst_ptr, dst_stride, src_ptr, src_stride, device_workspace_ptr, reduction_identity, reduction_op, stream); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace reduction } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h/0
{ "file_path": "cutlass/include/cutlass/reduction/device/tensor_reduce_affine_strided.h", "repo_id": "cutlass", "token_count": 4123 }
36
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Defines a proxy class for storing Tensor Float 32 data type. */ #pragma once #if defined(__CUDACC_RTC__) #include "cutlass/floating_point_nvrtc.h" #else #include <cmath> #include <limits> #include <cstdint> #endif #include "cutlass/cutlass.h" namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// /// Tensor Float 32 data type struct alignas(4) tfloat32_t { // // Data members // /// Storage type uint32_t storage; // // Methods // /// Constructs from an unsigned int CUTLASS_HOST_DEVICE static tfloat32_t bitcast(uint32_t x) { tfloat32_t h; h.storage = x; return h; } /// Emulated rounding is fast in device code CUTLASS_HOST_DEVICE static tfloat32_t round_half_ulp_truncate(float const &s) { uint32_t x = reinterpret_cast<uint32_t const &>(s); #if defined(__CUDA_ARCH__) if (::isfinite(s)) { x += 0x1000u; } #else if (std::isfinite(s)) { x += 0x1000u; } #endif return tfloat32_t::bitcast(x); } /// Default constructor tfloat32_t() = default; /// Floating-point conversion - round toward nearest even CUTLASS_HOST_DEVICE // explicit tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { } tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { } /// Floating-point conversion - round toward nearest even CUTLASS_HOST_DEVICE // explicit tfloat32_t(double x): tfloat32_t(float(x)) { tfloat32_t(double x): tfloat32_t(float(x)) { } /// Integer conversion - round toward zero CUTLASS_HOST_DEVICE // explicit tfloat32_t(int x) { tfloat32_t(int x) { float flt = static_cast<float>(x); #if defined(__CUDA_ARCH__) storage = reinterpret_cast<uint32_t const &>(flt); #else std::memcpy(&storage, &flt, sizeof(storage)); #endif } /// Converts to float CUTLASS_HOST_DEVICE operator float() const { // Conversions to IEEE single-precision requires clearing dont-care bits // of the mantissa. unsigned bits = (storage & ~0x1fffu); #if defined(__CUDA_ARCH__) return reinterpret_cast<float const &>(bits); #else float flt; std::memcpy(&flt, &bits, sizeof(flt)); return flt; #endif } /// Converts to float CUTLASS_HOST_DEVICE explicit operator double() const { return double(float(*this)); } /// Converts to int CUTLASS_HOST_DEVICE explicit operator int() const { return int(float(*this)); } /// Casts to bool CUTLASS_HOST_DEVICE explicit operator bool() const { return (float(*this) != 0.0f); } /// Obtains raw bits CUTLASS_HOST_DEVICE uint32_t raw() const { return storage; } /// Returns the sign bit CUTLASS_HOST_DEVICE bool signbit() const { return ((raw() & 0x80000000) != 0); } /// Returns the biased exponent CUTLASS_HOST_DEVICE int exponent_biased() const { return int((raw() >> 23) & 0x0ff); } /// Returns the unbiased exponent CUTLASS_HOST_DEVICE int exponent() const { return exponent_biased() - 127; } /// Returns the mantissa CUTLASS_HOST_DEVICE int mantissa() const { return int(raw() & 0x7fffff); } }; /////////////////////////////////////////////////////////////////////////////////////////////////// CUTLASS_HOST_DEVICE bool signbit(cutlass::tfloat32_t const& h) { return h.signbit(); } CUTLASS_HOST_DEVICE cutlass::tfloat32_t abs(cutlass::tfloat32_t const& h) { return cutlass::tfloat32_t::bitcast(h.raw() & 0x7fffffff); } CUTLASS_HOST_DEVICE bool isnan(cutlass::tfloat32_t const& h) { return (h.exponent_biased() == 0x0ff) && h.mantissa(); } CUTLASS_HOST_DEVICE bool isfinite(cutlass::tfloat32_t const& h) { return (h.exponent_biased() != 0x0ff); } CUTLASS_HOST_DEVICE cutlass::tfloat32_t nan_tf32(const char*) { // NVIDIA canonical NaN return cutlass::tfloat32_t::bitcast(0x7fffffff); } CUTLASS_HOST_DEVICE bool isinf(cutlass::tfloat32_t const& h) { return (h.exponent_biased() == 0x0ff) && !h.mantissa(); } CUTLASS_HOST_DEVICE bool isnormal(cutlass::tfloat32_t const& h) { return h.exponent_biased() && h.exponent_biased() != 0x0ff; } CUTLASS_HOST_DEVICE int fpclassify(cutlass::tfloat32_t const& h) { int exp = h.exponent_biased(); int mantissa = h.mantissa(); if (exp == 0x0ff) { if (mantissa) { return FP_NAN; } else { return FP_INFINITE; } } else if (!exp) { if (mantissa) { return FP_SUBNORMAL; } else { return FP_ZERO; } } return FP_NORMAL; } CUTLASS_HOST_DEVICE cutlass::tfloat32_t sqrt(cutlass::tfloat32_t const& h) { #if defined(__CUDACC_RTC__) return cutlass::tfloat32_t(sqrtf(float(h))); #else return cutlass::tfloat32_t(std::sqrt(float(h))); #endif } CUTLASS_HOST_DEVICE tfloat32_t copysign(tfloat32_t const& a, tfloat32_t const& b) { uint32_t a_mag = (reinterpret_cast<uint32_t const &>(a) & 0x7fffffff); uint32_t b_sign = (reinterpret_cast<uint32_t const &>(b) & 0x80000000); uint32_t result = (a_mag | b_sign); return reinterpret_cast<tfloat32_t const &>(result); } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // // Standard Library operations and definitions // /////////////////////////////////////////////////////////////////////////////////////////////////// namespace std { #if !defined(__CUDACC_RTC__) /// Numeric limits template <> struct numeric_limits<cutlass::tfloat32_t> { static bool const is_specialized = true; static bool const is_signed = true; static bool const is_integer = false; static bool const is_exact = false; static bool const has_infinity = true; static bool const has_quiet_NaN = true; static bool const has_signaling_NaN = false; static std::float_denorm_style const has_denorm = std::denorm_present; static bool const has_denorm_loss = true; static std::float_round_style const round_style = std::round_to_nearest; static bool const is_iec559 = false; static bool const is_bounded = true; static bool const is_modulo = false; static int const digits = 19; /// Least positive value static cutlass::tfloat32_t min() { return cutlass::tfloat32_t::bitcast(0x01); } /// Minimum finite value static cutlass::tfloat32_t lowest() { return cutlass::tfloat32_t::bitcast(0xff7fffff); } /// Maximum finite value static cutlass::tfloat32_t max() { return cutlass::tfloat32_t::bitcast(0x7f7fffff); } /// Returns smallest finite value static cutlass::tfloat32_t epsilon() { return cutlass::tfloat32_t::bitcast(0x1000); } /// Returns smallest finite value static cutlass::tfloat32_t round_error() { return cutlass::tfloat32_t(0.5f); } /// Returns smallest finite value static cutlass::tfloat32_t infinity() { return cutlass::tfloat32_t::bitcast(0x7f800000); } /// Returns smallest finite value static cutlass::tfloat32_t quiet_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); } /// Returns smallest finite value static cutlass::tfloat32_t signaling_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); } /// Returns smallest finite value static cutlass::tfloat32_t denorm_min() { return cutlass::tfloat32_t::bitcast(0x1); } }; #endif /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace std /////////////////////////////////////////////////////////////////////////////////////////////////// // // Arithmetic operators // /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /////////////////////////////////////////////////////////////////////////////////////////////////// CUTLASS_HOST_DEVICE bool operator==(tfloat32_t const& lhs, tfloat32_t const& rhs) { return float(lhs) == float(rhs); } CUTLASS_HOST_DEVICE bool operator!=(tfloat32_t const& lhs, tfloat32_t const& rhs) { return float(lhs) != float(rhs); } CUTLASS_HOST_DEVICE bool operator<(tfloat32_t const& lhs, tfloat32_t const& rhs) { return float(lhs) < float(rhs); } CUTLASS_HOST_DEVICE bool operator<=(tfloat32_t const& lhs, tfloat32_t const& rhs) { return float(lhs) <= float(rhs); } CUTLASS_HOST_DEVICE bool operator>(tfloat32_t const& lhs, tfloat32_t const& rhs) { return float(lhs) > float(rhs); } CUTLASS_HOST_DEVICE bool operator>=(tfloat32_t const& lhs, tfloat32_t const& rhs) { return float(lhs) >= float(rhs); } CUTLASS_HOST_DEVICE tfloat32_t operator+(tfloat32_t const& lhs, tfloat32_t const& rhs) { return tfloat32_t(float(lhs) + float(rhs)); } CUTLASS_HOST_DEVICE tfloat32_t operator-(tfloat32_t const& lhs) { union u_tff32 { float val_f32; tfloat32_t val_tf; CUTLASS_HOST_DEVICE u_tff32() : val_f32(0) { } }; union u_tff32 x; x.val_f32 = -reinterpret_cast<float const &>(lhs); return x.val_tf; } CUTLASS_HOST_DEVICE tfloat32_t operator-(tfloat32_t const& lhs, tfloat32_t const& rhs) { return tfloat32_t(float(lhs) - float(rhs)); } CUTLASS_HOST_DEVICE tfloat32_t operator*(tfloat32_t const& lhs, tfloat32_t const& rhs) { return tfloat32_t(float(lhs) * float(rhs)); } CUTLASS_HOST_DEVICE tfloat32_t operator/(tfloat32_t const& lhs, tfloat32_t const& rhs) { return tfloat32_t(float(lhs) / float(rhs)); } CUTLASS_HOST_DEVICE tfloat32_t& operator+=(tfloat32_t & lhs, tfloat32_t const& rhs) { lhs = tfloat32_t(float(lhs) + float(rhs)); return lhs; } CUTLASS_HOST_DEVICE tfloat32_t& operator-=(tfloat32_t & lhs, tfloat32_t const& rhs) { lhs = tfloat32_t(float(lhs) - float(rhs)); return lhs; } CUTLASS_HOST_DEVICE tfloat32_t& operator*=(tfloat32_t & lhs, tfloat32_t const& rhs) { lhs = tfloat32_t(float(lhs) * float(rhs)); return lhs; } CUTLASS_HOST_DEVICE tfloat32_t& operator/=(tfloat32_t & lhs, tfloat32_t const& rhs) { lhs = tfloat32_t(float(lhs) / float(rhs)); return lhs; } CUTLASS_HOST_DEVICE tfloat32_t& operator++(tfloat32_t & lhs) { float tmp(lhs); ++tmp; lhs = tfloat32_t(tmp); return lhs; } CUTLASS_HOST_DEVICE tfloat32_t& operator--(tfloat32_t & lhs) { float tmp(lhs); --tmp; lhs = tfloat32_t(tmp); return lhs; } CUTLASS_HOST_DEVICE tfloat32_t operator++(tfloat32_t & lhs, int) { tfloat32_t ret(lhs); float tmp(lhs); tmp++; lhs = tfloat32_t(tmp); return ret; } CUTLASS_HOST_DEVICE tfloat32_t operator--(tfloat32_t & lhs, int) { tfloat32_t ret(lhs); float tmp(lhs); tmp--; lhs = tfloat32_t(tmp); return ret; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // // User-defined literals // CUTLASS_HOST_DEVICE cutlass::tfloat32_t operator "" _tf32(long double x) { return cutlass::tfloat32_t(float(x)); } CUTLASS_HOST_DEVICE cutlass::tfloat32_t operator "" _tf32(unsigned long long int x) { return cutlass::tfloat32_t(int(x)); } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/tfloat32.h/0
{ "file_path": "cutlass/include/cutlass/tfloat32.h", "repo_id": "cutlass", "token_count": 4661 }
37
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief This defines a "fragment" iterator for visiting the fragments of a warp vector that participate in one warp-level mma operation. Typically, this is used to access the scale/bias fragement of a warp-level mma operation. The scale/bias vector is then partitioned into smaller fragments that can be fed into next warp-level mma operation. This iterator is necessary to accomplish warp-level mma fusion where the scale/bias vector is applied to the multiplicand for the next mma. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/matrix_shape.h" #include "cutlass/layout/matrix.h" #include "cutlass/layout/tensor.h" #include "cutlass/numeric_conversion.h" namespace cutlass { namespace transform { namespace warp { //////////////////////////////////////////////////////////////////////////////// template < /// Size of the input fragment tile shape (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Layout of operand in memory typename Layout_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, //// Number of elements per access when loading fragment int ElementsPerAccess> class VectorFragmentIterator; // Partial specialization for PitchLinear layout tile template < /// Size of the input fragment vector shape (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, //// Number of elements per access when loading fragment int ElementsPerAccess> class VectorFragmentIterator<Shape_, Element_, cutlass::layout::PitchLinear, InstructionShape_, ElementsPerAccess> { public: /// Size of the input threadblock tile shape (concept: MatrixShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::PitchLinear; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Number of participating threads static int const kThreads = 32; static int const kElementsPerAccess = ElementsPerAccess; static int const kRowsPerIteration = 8; static int const kColumnsPerAccess = 8; static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kK / kThreads; static int const kAccessPerIteration = kElementsPerIteration / kElementsPerAccess; /// Number of iterations using Iterations = MatrixShape<InstructionShape::kM / kRowsPerIteration, Shape::kContiguous / kElementsPerIteration>; public: // // Derived quantities // // All fragments have kElementsPerAccess scale followed by bias /// Fragment object holding a thread's part of a tile /// This is the fragment size produced by one iteration of the iterator. using Fragment = Array<Element, kElementsPerIteration * Iterations::kRow>; /// Input threadblock fragment tile using ThreadblockFragment = Array<Element, Shape::kContiguous >; private: /// Internal access type using AccessType = Array<Element, kElementsPerAccess>; private: // // Data members // /// Input threadblock fragment tile AccessType const *iterator_; /// Internal index int index_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE VectorFragmentIterator(ThreadblockFragment const &threadblock_frag) : iterator_(reinterpret_cast<AccessType const *>(&threadblock_frag)), index_(0) {} /// Add offset CUTLASS_HOST_DEVICE void add_offset(int index_offset) { index_ += index_offset; if(index_ >= Iterations::kColumn) index_ = 0; } /// Increments CUTLASS_HOST_DEVICE VectorFragmentIterator &operator++() { add_offset(1); return *this; } CUTLASS_HOST_DEVICE void set_index(int idx) { index_ = idx; } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag) const { AccessType *frag_ptr = reinterpret_cast<AccessType *>(&frag); CUTLASS_PRAGMA_UNROLL for (int r = 0; r < Iterations::kRow; r++) { CUTLASS_PRAGMA_UNROLL for (int i = 0; i < kAccessPerIteration; i++) { frag_ptr[i * Iterations::kRow + r].clear(); frag_ptr[i * Iterations::kRow + r] = iterator_[index_ * kAccessPerIteration + i]; } } } }; // Partial specialization for Row-Major layout tile template < /// Size of the input fragment tile shape (concept: MatrixShape) typename Shape_, /// Element type typename Element_, /// Shape of one matrix product operation (concept: MatrixShape) typename InstructionShape_, //// Number of elements per access when loading fragment int ElementsPerAccess> class VectorFragmentIterator<Shape_, Element_, cutlass::layout::RowMajor, InstructionShape_, ElementsPerAccess> { public: /// Size of the input threadblock tile shape (concept: MatrixShape) using Shape = Shape_; /// Element type using Element = Element_; /// Layout of source tile using Layout = cutlass::layout::RowMajor; /// Shape of one matrix product operation (concept: MatrixShape) using InstructionShape = InstructionShape_; /// Underlying iterator using Base = VectorFragmentIterator< layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element, layout::PitchLinear, InstructionShape, ElementsPerAccess>; public: // // Derived quantities // /// Fragment object holding a thread's part of a tile /// This is the fragment size produced by one iteration of the iterator. using Fragment = typename Base::Fragment; /// Input threadblock fragment tile using ThreadblockFragment = typename Base::ThreadblockFragment; private: /// Underlying iterator Base iterator_; public: /// Constructs an iterator CUTLASS_HOST_DEVICE VectorFragmentIterator(ThreadblockFragment const &threadblock_frag) : iterator_(threadblock_frag) {} /// Add offset CUTLASS_HOST_DEVICE void add_offset(int index_offset) { iterator_.add_offset(index_offset); } /// Increments CUTLASS_HOST_DEVICE VectorFragmentIterator &operator++() { add_offset(1); return *this; } CUTLASS_HOST_DEVICE void set_index(int idx) { iterator_.set_index(idx); } /// Loads a fragment from the referenced part of the accumulator tile CUTLASS_HOST_DEVICE void load(Fragment &frag) const { iterator_.load(frag); } }; //////////////////////////////////////////////////////////////////////////////// } // namespace warp } // namespace conv } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h/0
{ "file_path": "cutlass/include/cutlass/transform/warp/vector_fragment_iterator.h", "repo_id": "cutlass", "token_count": 2738 }
38
# CuTe TMA Tensors Along your travels, you may find strange looking CuTe Tensors that are printed as something like ``` ArithTuple(0,_0,_0,_0) o ((_128,_64),2,3,1):((_1@0,_1@1),_64@1,_1@2,_1@3) ``` What is an `ArithTuple`? Are those tensor strides? What do those mean? What is this for? This documentation intends to answer those questions and introduce some of the more advanced features of CuTe. # Introduction to TMA instructions The Tensor Memory Accelerator (TMA) is a set of instructions for copying possibly multidimensional arrays between global and shared memory. TMA was introduced in the Hopper architecture. A single TMA instruction can copy an entire tile of data all at once. As a result, the hardware no longer needs to compute individual memory addresses and issue a separate copy instruction for each element of the tile. To accomplish this, the TMA instruction is given a *TMA descriptor*, which is a packed representation of a multidimensional tensor in global memory with 1, 2, 3, 4, or 5 dimensions. The TMA descriptor holds * the base pointer of the tensor; * the data type of the tensor's elements (e.g., `int`, `float`, `double`, or `half`); * the size of each dimension; * the stride within each dimension; and * other flags representing the smem box size, smem swizzling patterns, and out-of-bounds access behavior. This descriptor must be created on the host before kernel execution. It is shared between all thread blocks that will be issuing TMA instructions. Once inside the kernel, the TMA is executed with the following parameters: * pointer to the TMA descriptor; * pointer to the SMEM; and * coordinates into the GMEM tensor represented within the TMA descriptor. For example, the interface for TMA-store with 3-D coordinates looks like this. ```cpp struct SM90_TMA_STORE_3D { CUTE_DEVICE static void copy(void const* const desc_ptr, void const* const smem_ptr, int32_t const& crd0, int32_t const& crd1, int32_t const& crd2) { // ... invoke CUDA PTX instruction ... } }; ``` We observe that the TMA instruction does not directly consume pointers to global memory. Indeed, the global memory pointer is contained in the descriptor, is considered constant, and is NOT a separate parameter to the TMA instruction. Instead, the TMA consumes TMA coordinates into the TMA's view of global memory that is defined in the TMA descriptor. That means that an ordinary CuTe Tensor that stores a GMEM pointer and computes offsets and new GMEM pointers is useless to the TMA. What do we do? # Building a TMA Tensor ## Implicit CuTe Tensors All CuTe Tensors are compositions of Layouts and Iterators. An ordinary global memory tensor's iterator is its global memory pointer. However, a CuTe Tensor's iterator doesn't have to be a pointer; it can be any random-access iterator. One example of such an iterator is a *counting iterator*. This represents a possibly infinite sequence of integers that starts at some value. We call the members of this sequence *implicit integers*, because the sequence is not explicitly stored in memory. The iterator just stores its current value. We can use a counting iterator to create a tensor of implicit integers, ```cpp Tensor A = make_tensor(counting_iterator<int>(42), make_shape(4,5)); print_tensor(A); ``` which outputs ``` counting_iter(42) o (4,5):(_1,4): 42 46 50 54 58 43 47 51 55 59 44 48 52 56 60 45 49 53 57 61 ``` This tensor maps logical coordinates to on-the-fly computed integers. Because it's still a CuTe Tensor, it can still be tiled and partitioned and sliced just like a normal tensor by accumulating integer offsets into the iterator. But the TMA doesn't consume pointers or integers, it consumes coordinates. Can we make a tensor of implicit TMA coordinates for the TMA instruction to consume? If so, then we could presumably also tile and partition and slice that tensor of coordinates so that we would always have the right TMA coordinate to give to the instruction. ## ArithTupleIterators and ArithTuples First, we build a `counting_iterator` equivalent for TMA coordinates. It should support * dereference to a TMA coordinate, and * offset by another TMA coordinate. We'll call this an `ArithmeticTupleIterator`. It stores a coordinate (a tuple of integers) that is represented as an `ArithmeticTuple`. The `ArithmeticTuple` is simply a (public subclass of) `cute::tuple` that has an overloaded `operator+` so that it can be offset by another tuple. The sum of two tuples is the tuple of the sum of the elements. Now similar to `counting_iterator<int>(42)` we can create an implicit "iterator" (but without increment or other common iterator operations) over tuples that can be dereferenced and offset by other tuples ```cpp ArithmeticTupleIterator citer_1 = make_inttuple_iter(42, Int<2>{}, Int<7>{}); ArithmeticTupleIterator citer_2 = citer_1 + make_tuple(Int<0>{}, 5, Int<2>{}); print(*citer_2); ``` which outputs ``` (42,7,_9) ``` A TMA Tensor can use an iterator like this to store the current TMA coordinate "offset". The "offset" here is in quotes because it's clearly not a normal 1-D array offset or pointer. In summary, one creates a TMA descriptor for the *whole global memory tensor*. The TMA descriptor defines a view into that tensor and the instruction takes TMA coordinates into that view. In order to generate and track those TMA coordinates, we define an implicit CuTe Tensor of TMA coordinates that can be tiled, sliced, and partitioned the exact same way as an ordinary CuTe Tensor. We can now track and offset TMA coordinates with this iterator, but how do we get CuTe Layouts to generate non-integer offsets? ## Strides aren't just integers Ordinary tensors have a layout that maps a logical coordinate `(i,j)` into a 1-D linear index `k`. This mapping is the inner-product of the coordinate with the strides. TMA Tensors hold iterators of TMA coordinates. Thus, a TMA Tensor's Layout must map a logical coordinate to a TMA coordinate, rather than to a 1-D linear index. To do this, we can abstract what a stride is. Strides need not be integers, but rather any algebraic object that supports inner-product with the integers (the logical coordinate). The obvious choice is the `ArithmeticTuple` we used earlier since they can be added to each other, but this time additionally equipped with an `operator*` so it can also be scaled by an integer. ### Aside: Integer-module strides A group of objects that support addition between elements and product between elements and integers is called an integer-module. Formally, an integer-module is an abelian group `(M,+)` equipped with `Z*M -> M`, where `Z` are the integers. That is, an integer-module `M` is a group that supports inner products with the integers. The integers are an integer-module. Rank-R tuples of integers are an integer-module. In principle, layout strides may be any integer-module. ### Basis elements CuTe's basis elements live in the header file `cute/numeric/arithmetic_tuple.hpp`. To make it easy to create `ArithmeticTuple`s that can be used as strides, CuTe defines normalized basis elements using the `E` type alias. "Normalized" means that the scaling factor of the basis element is the compile-time integer 1. | C++ object | Description | String representation | | --- | --- | --- | | `E<>{}` | `1` | `1` | | `E<0>{}` | `(1,0,...)` | `1@0` | | `E<1>{}` | `(0,1,0,...)` | `1@1` | | `E<0,1>{}` | `((0,1,0,...),0,...)` | `1@1@0` | | `E<1,0>{}` | `(0,(1,0,...),0,...)` | `1@0@1` | The "description" column in the above table interprets each basis element as an infinite tuple of integers, where all the tuple's entries not specified by the element's type are zero. We count tuple entries from left to right, starting with zero. For example, `E<1>{}` has a 1 in position 1: `(0,1,0,...)`. `E<3>{}` has a 1 in position 3: `(0,0,0,1,0,...)`. Basis elements can be *nested*. For instance, in the above table, `E<0,1>{}` means that in position 0 there is a `E<1>{}`: `((0,1,0,...),0,...)`. Basis elements can be *scaled*. That is, they can be multiplied by an integer *scaling factor*. For example, in `5*E<1>{}`, the scaling factor is `5`. `5*E<1>{}` prints as `5@1` and means `(0,5,0,...)`. The scaling factor commutes through any nesting. For instance, `5*E<0,1>{}` prints as `5@1@0` and means `((0,5,0,...),0,...)`. Basis elements can also be added together, as long as their hierarchical structures are compatible. For example, `3*E<0>{} + 4*E<1>{}` results in `(3,4,0,...)`. Intuitively, "compatible" means that the nested structure of the two basis elements matches well enough to add the two elements together. ### Linear combinations of strides Layouts work by taking the inner product of the natural coordinate with their strides. For strides made of integer elements, e.g., `(1,100)`, the inner product of the input coordinate `(i,j)` and the stride is `i + 100j`. Offsetting an "ordinary" tensor's pointer and this index gives the pointer to the tensor element at `(i,j)`. For strides of basis elements, we still compute the inner product of the natural coordinate with the strides. For example, if the stride is `(1@0,1@1)`, then the inner product of the input coordinate `(i,j)` with the strides is `i@0 + j@1 = (i,j)`. That translates into the (TMA) coordinate `(i,j)`. If we wanted to reverse the coordinates, then we could use `(1@1,1@0)` as the stride. Evaluating the layout would give `i@1 + j@0 = (j,i)`. A linear combination of basis elements can be interpreted as a possibly multidimensional and hierarchical coordinate. For instance, `2*2@1@0 + 3*1@1 + 4*5@1 + 7*1@0@0` means `((0,4,...),0,...) + (0,3,0,...) + (0,20,0,...) + ((7,...),...) = ((7,4,...),23,...)` and can be interpreted as the coordinate `((7,4),23)`. Thus, linear combinations of these strides can be used to generate TMA coordinates. These coordinates, in turn, can be used to offset TMA coordinate iterators. ## Application to TMA Tensors Now we can build CuTe Tensors like the one seen in the introduction. ```cpp Tensor a = make_tensor(make_inttuple_iter(0,0), make_shape ( 4, 5), make_stride(E<0>{}, E<1>{})); print_tensor(a); Tensor b = make_tensor(make_inttuple_iter(0,0), make_shape ( 4, 5), make_stride(E<1>{}, E<0>{})); print_tensor(b); ``` prints ``` ArithTuple(0,0) o (4,5):(_1@0,_1@1): (0,0) (0,1) (0,2) (0,3) (0,4) (1,0) (1,1) (1,2) (1,3) (1,4) (2,0) (2,1) (2,2) (2,3) (2,4) (3,0) (3,1) (3,2) (3,3) (3,4) ArithTuple(0,0) o (4,5):(_1@1,_1@0): (0,0) (1,0) (2,0) (3,0) (4,0) (0,1) (1,1) (2,1) (3,1) (4,1) (0,2) (1,2) (2,2) (3,2) (4,2) (0,3) (1,3) (2,3) (3,3) (4,3) ```
cutlass/media/docs/cute/0z_tma_tensors.md/0
{ "file_path": "cutlass/media/docs/cute/0z_tma_tensors.md", "repo_id": "cutlass", "token_count": 3694 }
39
![ALT](../images/gemm-hierarchy-with-epilogue-no-labels.png "CUTLASS Terminology") [README](../../README.md#documentation) > **Terminology** # CUTLASS Terminology **cute::Layout**: A `cute::Layout` vocabulary type composed of the hierarchical `cute::Shape` and `cute::Stride` tuples that is used throughout CUTLASS 3.0 to represent and manipulate thread and data layouts. More details are included in the [CuTe specific tensor type documentation](/media/docs/cute/03_tensor.md). **cute::Tensor**: A pointer backed by a `cute::Layout` used to represent a tensor. More details are included in the [CuTe specific tensor type documentation](/media/docs/cute/03_tensor.md). **Capacity**: (scalar) physical number of elements in memory required to store a multidimensional object; expressed as the type's LongIndex type - example: the capacity of a column-major matrix is `lda * N` **Element**: data type describing one item in a multidimensional tensor, array, or matrix **Extent**: (vector-valued quantity) the logical size of each dimension of a multidimensional index space. Consistent with the [C++ Standard Library](https://en.cppreference.com/w/cpp/types/extent). - `Coord<N> extent()` - `Index extent(int dim)` **Fragment**: a register-backed array of elements used to store a thread's part of a tile **Index**: signed integer representing quantities aligned with a logical dimension **Layout**: functor mapping logical coordinates of a tensor to linear offset (as LongIndex); owns stride vectors, if any. **LongIndex**: signed integer representing offsets in memory; typically wider than Index type **Numeric Type**: a CUTLASS data type used to represent real-valued quantities; is trivially copyable. **Pitch Linear**: linear memory allocation obtained from a user-defined 2-D size, which specifies the contiguous and strided dimensions of a tile. **Planar Complex**: representation of complex tensors as two real-valued tensors, with real elements in one part and imaginary elements in another part of identical layout, separated by an offset **Policy**: additional details extending the interface of a template guiding internal implementation; typically used to target specific design points known to be efficient **Rank**: number of dimensions in a multidimensional index space, array, tensor, or matrix. Consistent with [C++ Standard Library](https://en.cppreference.com/w/cpp/types/rank) **Register**: in device code, registers are the most efficient storage for statically sized arrays of elements. Arrays may be expected to be stored in registers if all accesses are made via constexpr indices or within fully unrolled loops. **Residue**: partial tile or matrix computation which may require special accommodation for functional correctness or performance **Size**: (scalar) number of logical elements in a tensor; equal to the product of each member of `extent()` - `LongIndex size()` `sizeof_bits<T>::value` - template pattern returning the size of a numeric type or array in units of bits **Storage**: when appropriate, refers to some alternative type used to store a packed collection of elements; may be used to handle bit-level packing or make types safe for use in unions **TensorRef**: contains base pointer and _Layout_ object for referencing infinitely-sized tensor object **TensorView**: contains _TensorRef_ and extent of a finite mathematical object **Tile**: partitions of a tensor that have constant extents and layout known at compile time **Trait**: characteristics of a fully-specialized type, typically used in metaprogramming reflection **View**: an object containing references to a data structure that it does not own; typically, construction of views is lightweight **Warp**: a collection of hardware threads executing in lock-step; warp-level operations typically rely on cooperation among the threads within the warp `AlignedBuffer<T, N>`: statically sized array type; union-safe, no construction guarantee for elements `Array<T, N>`: container for holding numeric types - handles bit packing for small numeric types (e.g. int4_t, uint4_t, bin1_t) `sizeof(Array<T, N>)` - gives expected value in units of bytes with minimum storage of `1 B`: (sizeof_bits<T>::value * N) / 8 **Operator**: an object performing a computation on matrix or tensor objects. May be further refined by scope within the execution model hierarchy. Deprecated starting CUTLASS 3.0, replaced by [MMA and Copy atoms from CuTe](/media/docs/cute/0t_mma_atom.md). **Tile Iterator**: abstraction for accessing and traversing a sequence of tiles in a tensor; CUTLASS specifies [formal concepts for tile iterators](tile_iterator_concept.md). Deprecated starting CUTLASS 3.0. Replaced by `cute::Layout` in equivalent usage scenarios to represent data tensors. **Thread Map**: abstraction for defining how threads are mapped to a given tile. Deprecated starting CUTLASS 3.0. Replaced by `cute::Layout` in equivalent usage scenarios to represent thread tensors. # Copyright Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: BSD-3-Clause ``` Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ```
cutlass/media/docs/terminology.md/0
{ "file_path": "cutlass/media/docs/terminology.md", "repo_id": "cutlass", "token_count": 1752 }
40
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Common data types and string names/tags for them """ import enum from cutlass_library import ( ComplexTransform, DataType, DataTypeSize, EpilogueScheduleType, KernelScheduleType, MathOperation, OpcodeClass, TileSchedulerType ) # The following block implements enum.auto() for Python 3.5 variants that don't include it such # as the default 3.5.2 on Ubuntu 16.04. # # https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility try: from enum import auto as enum_auto except ImportError: __cutlass_library_auto_enum = 0 def enum_auto() -> int: global __cutlass_library_auto_enum i = __cutlass_library_auto_enum __cutlass_library_auto_enum += 1 return i class DataTypeSizeBytes: """ Static class to mimic the `DataTypeSize` dictionary, but with checks for whether the data type key is less than a full byte or a non-integer number of bytes. """ @staticmethod def __class_getitem__(datatype): """ Returns the number of bytes in size the data type is. Raises an exception if the data type is either less than a full byte or a non-integer number of bytes in size. :param datatype: data type to query :return: number of bytes the data type occupies :rtype: int """ bits = DataTypeSize[datatype] if bits < 8: raise Exception( f"Data type {datatype} is less than one byte in size." ) elif bits % 8 != 0: raise Exception( f"Data type datatype is not an integer number of bytes." ) return bits // 8 class SchedulerMode(enum.Enum): Device = enum_auto() Host = enum_auto() SchedulerModeTag = { SchedulerMode.Device: "cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly", SchedulerMode.Host: "cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute", } ShortSchedulerModeNames = {SchedulerMode.Device: "Device", SchedulerMode.Host: "Host"} class FunctionalOp(enum.Enum): AtomicAdd = enum_auto() AtomicMaximum = enum_auto() Divides = enum_auto() Maximum = enum_auto() Minimum = enum_auto() Minus = enum_auto() Multiplies = enum_auto() MultiplyAdd = enum_auto() Plus = enum_auto() FunctionalOpTag = { FunctionalOp.AtomicAdd: "cutlass::atomic_add", FunctionalOp.AtomicMaximum: "cutlass::atomic_maximum", FunctionalOp.Divides: "cutlass::divides", FunctionalOp.Maximum: "cutlass::maximum", FunctionalOp.Minimum: "cutlass::minimum", FunctionalOp.Minus: "cutlass::minus", FunctionalOp.Multiplies: "cutlass::multiplies", FunctionalOp.MultiplyAdd: "cutlass::multiply_add", FunctionalOp.Plus: "cutlass::plus", } class ActivationOp(enum.Enum): DGelu = enum_auto() Gelu = enum_auto() GeluTaylor = enum_auto() HardSwish = enum_auto() Identity = enum_auto() LeakyReLU = enum_auto() ReLU = enum_auto() Sigmoid = enum_auto() SiLU = enum_auto() Tanh = enum_auto() ActivationOpTag = { ActivationOp.DGelu: "cutlass::epilogue::thread::dGELU", ActivationOp.Gelu: "cutlass::epilogue::thread::GELU", ActivationOp.GeluTaylor: "cutlass::epilogue::thread::GELU_taylor", ActivationOp.HardSwish: "cutlass::epilogue::thread::HardSwish", ActivationOp.Identity: "cutlass::epilogue::thread::Identity", ActivationOp.LeakyReLU: "cutlass::epilogue::thread::LeakyReLU", ActivationOp.ReLU: "cutlass::epilogue::thread::ReLu", ActivationOp.Sigmoid: "cutlass::epilogue::thread::Sigmoid", ActivationOp.SiLU: "cutlass::epilogue::thread::SiLu", ActivationOp.Tanh: "cutlass::epilogue::thread::Tanh", } def op_tag(op) -> str: """ Dispatches `op` to the appropriate *Tag dictionary depending on whether `op` is an ActivationOp or FunctionalOp. This is useful for cases in which either type can be used. :param op: operation to emit a tag for :type op: ActivationOp | FunctionalOp :return: tag corresponding to op :rtype: str """ if isinstance(op, ActivationOp): return ActivationOpTag[op] elif isinstance(op, FunctionalOp): return FunctionalOpTag[op] else: raise Exception(f"Unexpected op type {op}. Must be one of ActivationOp or FunctionalOp.") class FloatRoundStyle(enum.Enum): ToNearest = enum_auto() ToNearestSatfinite = enum_auto() Indeterminate = enum_auto() TowardZero = enum_auto() TowardInfinity = enum_auto() TowardNegInfinity = enum_auto() HalfUlpTruncDntz = enum_auto() HalfUlpTruncate = enum_auto() FloatRoundStyleTag = { FloatRoundStyle.ToNearest: "cutlass::FloatRoundStyle::round_to_nearest", FloatRoundStyle.ToNearestSatfinite: "cutlass::FloatRoundStyle::round_to_nearest_satfinite", FloatRoundStyle.Indeterminate: "cutlass::FloatRoundStyle::round_indeterminate", FloatRoundStyle.TowardZero: "cutlass::FloatRoundStyle::round_toward_zero", FloatRoundStyle.TowardInfinity: "cutlass::FloatRoundStyle::round_toward_infinity", FloatRoundStyle.TowardNegInfinity: "cutlass::FloatRoundStyle::round_toward_neg_infinity", FloatRoundStyle.HalfUlpTruncDntz: "cutlass::FloatRoundStyle::round_half_ulp_trunc_dntz", FloatRoundStyle.HalfUlpTruncate: "cutlass::FloatRoundStyle::round_half_ulp_truncate", } class MathInstruction: """ Description of a the lowest-level matrix-multiply-accumulate operation to be used in a kernel """ def __init__( self, instruction_shape, element_a, element_b, element_accumulator, opcode_class=OpcodeClass.Simt, math_operation=MathOperation.multiply_add, ): """ :param instruction_shape: size of the [M, N, K] dimensions of the instruction :type instruction_shape: list or tuple :param element_a: data type of operand A :param element_b: data type of operand B :param element_accumulator: data type used in accumulation :param opcode_class: higher-level class of the instruction (e.g., SIMT or Tensor Core) :type opcode_class: cutlass_library.library.OpcodeClass :param math_operation: the type of low-level operation to be performed (e.g., multiply accumulate) :type math_operation: MathOperation """ self.instruction_shape = instruction_shape self.element_a = element_a self.element_b = element_b self.element_accumulator = element_accumulator self.opcode_class = opcode_class self.math_operation = math_operation class TileDescription: """ Description of a tile of computation to be performed in the kernel, encompassing threadblock, cluster, and warp shapes, stage count, and math instruction specification """ def __init__( self, threadblock_shape, stages, warp_count, math_instruction, cluster_shape=[1, 1, 1], kernel_schedule: KernelScheduleType = None, epilogue_schedule: EpilogueScheduleType = None, tile_scheduler: TileSchedulerType = None ): """ :param threadblock_shape: shape of a threadblock tyle :type threadblock_shape: list or tuple :param stages: number of pipline stages in the operation. For SM90 kernels, this can be set to `None` and the maximum number of stages that can be supported for an operation on a given architecture will be computed at a later time :type stages: int or None :param warp_count: number of warps in each [M, N, K] dimension of a threadblock tile :type warp_count: list, tuple, or None :param math_instruction: specification of the instruction type and shape to be performed and the types of its operands :type math_instruction: MathInstruction :param cluster_shape: number of threadblocks in the [X, Y, Z] dimensions of a threadblock cluster :param kernel_schedule: type of kernel schedule to use (only available for SM90+) :type kernel_schedule: cutlass_library.KernelScheduleType :param epilogue_schedule: type of epilogue schedule to use (only available for SM90+) :type epilogue_schedule: cutlass_library.EpilogueScheduleType :param tile_scheduler: type of tile scheduler to use (only available for SM90+) :type tile_scheduler: cutlass_library.TileSchedulerType """ if ((kernel_schedule is None and epilogue_schedule is not None) or (kernel_schedule is not None and epilogue_schedule is None)): raise Exception("Kernel and epilogue schedule must either both be Auto or neither be Auto.") self.threadblock_shape = threadblock_shape self.cluster_shape = cluster_shape self.kernel_schedule = kernel_schedule self.epilogue_schedule = epilogue_schedule self.tile_scheduler = tile_scheduler self.stages = stages self.math_instruction = math_instruction self.instruction_shape = math_instruction.instruction_shape # Number of warps along x, y, z directions self.warp_count = warp_count def clone_and_update(self, td: dict): attrs = { "cluster_shape": None, "threadblock_shape": None, "warp_count": None, "stages": None, "instruction_shape": None, "kernel_schedule": None, "epilogue_schedule": None, "tile_scheduler": None } for key in attrs.keys(): if key in td.keys(): attrs[key] = td[key] else: attrs[key] = getattr(self, key) attrs["math_instruction"] = MathInstruction( attrs["instruction_shape"], self.math_instruction.element_a, self.math_instruction.element_b, self.math_instruction.element_accumulator, self.math_instruction.opcode_class, self.math_instruction.math_operation ) # Remove the instruction shape del attrs["instruction_shape"] return TileDescription(**attrs) @property def num_threads(self): """ Returns the number of threads in the threadblock :return: number of threads in the threadblock :rtype: int or None (if warp count is None) """ if self.warp_count is not None: threads = 32 for cnt in self.warp_count: threads *= cnt return threads return None def procedural_name(self): """ Returns a name identifying the tile description :return: name identifying the tile description :rtype: int """ emit_stages = 0 if self.stages is None else self.stages name = "%dx%dx%d_%dx%d_%dx%d" % ( self.cluster_shape[0], self.cluster_shape[1], self.cluster_shape[2], self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], emit_stages ) return name def procedural_name_2x(self): """ Returns a name identifying the tile description :return: name identifying the tile description :rtype: int """ return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages) def __str__(self): """ Returns a string with containing each of the tile description's values :return: contents of tile description :rtype: str """ if self.kernel_schedule is not None: kschedule = self.kernel_schedule else: kschedule = KernelScheduleType.ScheduleAuto if self.epilogue_schedule is not None: eschedule = self.epilogue_schedule else: eschedule = EpilogueScheduleType.ScheduleAuto if self.tile_scheduler is not None: tschedule = self.tile_scheduler.name else: tschedule = "None" return f""" {{ ClusterShape: {self.cluster_shape} ThreadblockShape: {self.threadblock_shape} WarpCount: {self.warp_count} Stages: {self.stages if self.stages is not None else 'Auto'} InstructionShape: {self.math_instruction.instruction_shape} Kernel schedule: {kschedule.name} Epilogue schedule: {kschedule.name} TileScheduler: {tschedule} }}""" class TensorDescription: def __init__(self, element, layout, alignment=1, complex_transform=ComplexTransform.none): self.element = element self.layout = layout if element != DataType.void: self.alignment = min(128 // DataTypeSize[self.element], alignment) else: self.alignment = alignment self.complex_transform = complex_transform def CalculateSmemUsagePerStage(operation): """ Returns the amount of shared memory in bytes consumed in a single stage of a kernel. :param op: operation for which the maximum stages should be computed. If stages are set via the `op.tile_description.stages` parameter, this setting is ignored in the present calculation :type op: cutlass.backend.Operation :return: number of bytes of shared memory consumed by a single stage :rtype: int """ m, n, k = operation.tile_description.threadblock_shape if operation.operation_kind == OperationKind.Gemm: stage_barrier_bytes = 32 return ( (DataTypeSize[operation.A.element] * m * k // 8) + (DataTypeSize[operation.B.element] * k * n // 8) + stage_barrier_bytes ) else: raise Exception("Unsupported operation kind {}.".format(operation.operation_kind)) def CalculateSmemUsage(operation): """ Returns the amount of shared memory in bytes consumed by a kernel. :param op: operation for which the maximum stages should be computed. If stages are set via the `op.tile_description.stages` parameter, this setting is ignored in the present calculation :type op: cutlass.backend.Operation :return: int """ return operation.tile_description.stages * CalculateSmemUsagePerStage(operation) class ApiVersion(enum.Enum): """ Differentiate between CUTLASS 2.x and 3.x API versions """ v2x = enum_auto() v3x = enum_auto() def api_version(arch, opclass, dtype): """ Returns whether the architecture, opcode class, and datatype in question require using CUTLASS 2.x or 3.x for code emission. :param arch: compute capability of device on which to run :type arch: int :param opclass: class of the operation being performed :type opclass: cutlass_library.OpcodeClass :param dtype: data type to be used in operation (assumes that ElementA and ElementB are the same) :type dtype: cutlass_library.DataType :return: API version to be used in code emission :rtype: ApiVersion """ if (arch >= 90 and opclass == OpcodeClass.TensorOp and (dtype != DataType.f64)): return ApiVersion.v3x else: return ApiVersion.v2x class EmissionType(enum.Enum): """ Tags for whether to emit a kernel- or device-level operation """ Kernel = enum_auto() Device = enum_auto()
cutlass/python/cutlass/backend/library.py/0
{ "file_path": "cutlass/python/cutlass/backend/library.py", "repo_id": "cutlass", "token_count": 6615 }
41
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Ease-of-use interface for constructing, compiling, and running GEMMs. The ``Gemm`` interface is meant to allow one to easily instantiate, compile, and run GEMM operations in CUTLASS via Python, without specifying many configuration parameters. Under the hood, the interface will select sensible default parameters for the many template parameters for CUTLASS GEMMs. Note: optimal performance is not to be expected from this interface. To achieve optimal performance, one should specify and tune each configuration parameter. The simplest example of using this interface is the following: .. highlight:: python .. code-block:: python # A, B, C, and D are torch/numpy/cupy tensor objects plan = cutlass.op.Gemm(A, B, C, D) plan.run() One can also use the interface by specifying data types of operands at construction and using different tensor objects with these data types at runtime: .. highlight:: python .. code-block:: python # The following is shorthand for: # cutlass.op.Gemm(element_A=torch.float32, element_B=torch.float32, # element_C=torch.float32, element_D=torch.float32, # element_accumulator=torch.float32, # layout=cutlass.LayoutType.RowMajor) plan = cutlass.op.Gemm(element=torch.float32, layout=cutlass.LayoutType.RowMajor) A0 = torch.rand((128, 256), device='cuda') B0 = torch.rand((256, 64), device='cuda') C0 = torch.zeros((128, 64), device='cuda') D0 = torch.zeros((128, 64), device.'cuda') plan.run(A0, B0, C0, D0) A = torch.rand((32, 128), device='cuda') B = torch.rand((128, 256), device='cuda') C = torch.zeros((32, 256), device='cuda') D = torch.zeros((32, 256), device.'cuda') plan.run(A1, B1, C1, D1) The interface additionally enables one to decouple the compilation of the underlying CUTLASS kernel from its execution: .. highlight:: python .. code-block:: python plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor) plan.compile() # Do other work... plan.run(A0, B0, C0, D0) # Do other work... plan.run(A1, B1, C1, D1) Elementwise activation functions are easily fused to the GEMM via the interface: .. highlight:: python .. code-block:: python plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor) plan.activation = cutlass.epilogue.relu Operations can also be run asynchronously: .. highlight:: python .. code-block:: python plan = cutlass.op.Gemm(element=np.float32, layout=cutlass.LayoutType.RowMajor) args = plan.run() # Do other work... args.sync() """ from math import prod from cuda import cuda from cutlass_library import ( DataType, DataTypeSize, GemmUniversalMode, ) import cutlass from cutlass import epilogue, swizzle from cutlass.backend import compiler from cutlass.backend.evt import EpilogueFunctorVisitor from cutlass.backend.gemm_operation import GemmArguments, GemmOperationUniversal from cutlass.backend.library import TensorDescription, TileDescription from cutlass.op.op import OperationBase from cutlass.shape import GemmCoord from cutlass.utils import check, datatypes class Gemm(OperationBase): """ Constructs a ``Gemm`` object. The data types and layouts of operands A, B, and C, along with the data type of output D and that used for accumulation, are bound to the ``Gemm`` object throughout its lifetime -- these are not to be changed after a ``Gemm`` has been constructed. The constructor has optional parameters for flexibly setting these parameters. The following constructors are equivalent: .. highlight:: python .. code-block:: python # Use F32 for A, B, C, D, and accumulation. All operands are row major. # Use the generic ``element`` and ``layout`` parameters to concisely set all data types and layouts # for operands to the same values. Gemm(element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) # Explicitly specify the data types to use for A, B, C, and D. Use the generic ``layout``. Gemm(element_A=cutlass.DataType.f32, element_B=cutlass.DataType.f32, element_C=cutlass.DataType.f32, element_D=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) # Set the data types and elements from existing tensors. Note that one can use different tensors when # executing GEMM via the ``run()`` method than passed in here (though those passed in to ``run()`` must # have the same data type and layout as those passed in here). # A, B, C, and D are row-major torch.Tensor objects of type torch.float32 Gemm(A=A, B=B, C=C, D=D) # Use the generic ``element`` and explicitly specify the layouts to use for A, B, and C (layout of D is # the same as that for D, at present) Gemm(element=cutlass.DataType.f32, layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor) # Explicitly specify the data type and layout for only some of A, B, C, and D. Unspecified data types # and layouts will inherit those passed in via the generic ``element`` and ``layout`` Gemm(element_A=cutlass.DataType.f32, layout_B=cutlass.LayoutType.RowMajor, element=cutlass.DataType.f32, layout=cutlass.LayoutType.RowMajor) The order of precedence for the setting of the data type and layout for a given operand/output is as follows: 1) If the tensor type is specified (e.g., ``A``), use the data type and layout inferred from this tensor 2) Otherwise, if the data type/layout (e.g., ``element_A``, ``layout_A``) is specified, use those 3) Otherwise, use the generic values (e.g., ``element``, ``layout``) :param cc: compute capability of device for which kernels should be compiled. For example, if running on H100, this should be set to 90 :type cc: int :param kernel_cc: compute capability of kernels to generate. For example, if running on SM90, but desiring to use a CUTLASS 2.x-style Ampere kernel, this should be set to 80 :type kernel_cc: int :param A: tensor representing data type and layout of operand A :param B: tensor representing data type and layout of operand B :param C: tensor representing data type and layout of operand C :param D: tensor representing data type and layout of operand D :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B :param beta: scalar parameter beta from GEMM operation that scales operand C :param element_accumulator: data type to be used in accumulation of the product of operands A and B :type element_accumulator: cutlass.DataType :param element: generic data type to be used for operands A, B, C, D, as well as the accumulation data type :type element: cutlass.DataType :param layout: generic layout type to be used for operands A, B, C, and D :type layout: cutlass.LayoutType :param element_A: data type to be used for operand A :type element_A: cutlass.DataType :param element_B: data type to be used for operand B :type element_B: cutlass.DataType :param element_C: data type to be used for operand C :type element_C: cutlass.DataType :param element_D: data type to be used for operand D :type element_D: cutlass.DataType :param layout_A: layout of operand A :type layout_A: cutlass.LayoutType :param layout_B: layout of operand B :type layout_B: cutlass.LayoutType :param layout_C: layout of operand C :type layout_C: cutlass.LayoutType :param layout_D: layout of operand D :type layout_D: cutlass.LayoutType """ def __init__( self, A=None, B=None, C=None, D=None, alpha=1.0, beta=0.0, element_accumulator=None, element=None, layout=None, element_A=None, element_B=None, element_C=None, element_D=None, layout_A=None, layout_B=None, layout_C=None, cc: int = None, kernel_cc: int = None ): super().__init__(cc=cc, kernel_cc=kernel_cc) self.name = "gemm" self.compiled = False elements = [] layouts = [] # Check that at least one of the following is set for each tensor (illustrated assuming tensor A): # ``A``, ``element_A``, ``element`` and ``A``, ``layout_A``, ``layout`` for elt, lay, tens, name in zip([element_A, element_B, element_C, element_D], [layout_A, layout_B, layout_C, layout_C], [A, B, C, D], ["A", "B", "C", "D"]): if elt is not None and tens is not None: raise Exception(f'Must not specify both element_{name} and tensor {name}') if lay is not None and tens is not None: raise Exception(f'Must not specify both layout_{name} and tensor {name}') if elt is None and tens is None and element is None: raise Exception(f'Must specify one of element_{name}, tensor {name}, or generic element.') if lay is None and tens is None and layout is None: raise Exception(f'Must specify one of layout_{name}, tensor {name}, or generic layout.') elt_to_set = None lay_to_set = None if tens is not None: elt_to_set, lay_to_set = datatypes.get_datatype_and_layout(tens) else: elt_to_set = elt if elt is not None else element lay_to_set = lay if lay is not None else layout elements.append(datatypes.library_type(elt_to_set)) layouts.append(lay_to_set) self._element_a, self._element_b, self._element_c, self._element_d = elements self._layout_a, self._layout_b, self._layout_c, self._layout_d = layouts if element_accumulator is None: self._element_accumulator = self._element_c else: self._element_accumulator = datatypes.library_type(element_accumulator) self.A = A self.B = B self.C = C self.D = D self.alpha = alpha self.beta = beta self.epilogue_functor = None self.op_class = None self._tile_description = None self._reset_operations() self._swizzling_functor = cutlass.swizzle.IdentitySwizzle1 def _reset_operations(self, reset_epilogue: bool = True): # Set the default op class datatype_comb = (self._element_a, self._element_b, self._element_accumulator) layout_comb = (self._layout_a, self._layout_b) self.possible_op_classes = self.options.supporting_opclasses( self._element_a, self._element_b, self._element_accumulator, self._layout_a, self._layout_b, self._math_operation) if cutlass.OpcodeClass.TensorOp in self.possible_op_classes: self.opclass = cutlass.OpcodeClass.TensorOp elif cutlass.OpcodeClass.Simt in self.possible_op_classes: self.opclass = cutlass.OpcodeClass.Simt else: if self._math_operation is not None: math_op_str = f' and math operation {self._math_operation}' else: math_op_str = '' raise Exception(f'No kernel configuration found for supported data type and layout ' f'combination {datatype_comb}x{layout_comb}{math_op_str}') if reset_epilogue: self._reset_epilogue_functor_activation(cutlass.epilogue.identity) @property def swizzling_functor(self): """ Returns the type of the swizzling functor currently being used by the GEMM :return: swizzing functor type """ return self._swizzling_functor @swizzling_functor.setter def swizzling_functor(self, swizzling_functor): """ Sets the swizzling functor to the type specified by `swizzling_functor` """ if swizzling_functor == cutlass.swizzle.ThreadblockSwizzleStreamK: if self.op_class == cutlass.OpcodeClass.Simt: raise Exception('ThreadblockSwizzleStreamK is currently only supported with opcode class TensorOp') if self.current_cc == 90: raise Exception('ThreadblockSwizzleStreamK is currently unsupported on SM90') self._swizzling_functor = swizzling_functor # # Tile description Related # @property def tile_description(self) -> TileDescription: """ Returns the tile description """ return self._tile_description @tile_description.setter def tile_description( self, td=None): """ Set the tile description :param td: tile description :type td: cutlass.backend.TileDescription, or a dict with keys { "threadblock_shape": [int, int, int], "warp_count": [int, int, int], "stages": int, "instruction_shape": [int, int, int] (optional), "cluster_shape": [int, int, int] (optional) } """ if td is None: return if isinstance(td, dict): if self._tile_description is None: op = self.possible_operations.default_operation(self._math_operation) self._tile_description = datatypes.td_from_profiler_op(op) td = self._tile_description.clone_and_update(td) valid, msg = self._valid_tile_description(td) if valid: self._tile_description = td else: raise Exception(msg) def _valid_tile_description(self, td: TileDescription) -> tuple: """ Checks whether the provided tile description is valid for the given compute capability. At present, this checks the following: - Does the tile description use a number of stages supported by the compute capability in question? - Does the tile size requested fit within shared memory? - Are cluster dimensions outside the valid range requested for a given architecture (e.g., more non-unit cluster dimensions for pre-SM90 architectures)? - Is the kernel schedule being used supported on the architecture in question? :param td: tile description to validate :type td: cutlass.backend.TileDescription :return: tuple in which the first element is a bool indicating that the tile description is valid and the second element is a string providing an optional error message. :rtype: tuple """ valid, msg = check.valid_stage_count(self.cc, self.current_cc, td, self._element_c, self._element_d) if not valid: return (valid, msg) valid, msg = check.valid_cluster_shape(self.current_cc, td.cluster_shape) if not valid: return (valid, msg) valid, msg = check.valid_schedule(self.current_cc, td.kernel_schedule, td.epilogue_schedule, td.tile_scheduler) return valid, msg def tile_descriptions(self) -> list: """ Returns a list of valid tile descriptions for the operations :returns: list of valid tile descriptions for the operations :rtype: list """ tds = [datatypes.td_from_profiler_op(op) for op in self.possible_operations.all_operations] if self._math_operation is not None: tds = [td for td in tds if td.math_instruction.math_operation == self._math_operation] return tds def construct( self, tile_description: TileDescription = None, alignment_A: int = None, alignment_B: int = None, alignment_C: int = None) -> GemmOperationUniversal: """ Constructs a ``cutlass.backend.GemmUniversalOperation`` based on the input parameters and current kernel specification of the ``Gemm`` object. :param tile_description: tile description specifying shapes and operand types to use in the kernel :type tile_description: cutlass.backend.TileDescription :param alignment_A: alignment of operand A :type alignment_A: int :param alignment_B: alignment of operand B :type alignment_B: int :param alignment_C: alignment of operand C :type alignment_C: int :return: operation that was constructed :rtype: cutlass.backend.GemmOperationUniversal """ alignment_pref_A = min(128 // DataTypeSize[self._element_a], max(self.possible_operations.alignments("A"))) alignment_pref_B = min(128 // DataTypeSize[self._element_b], max(self.possible_operations.alignments("B"))) alignment_A = check.alignment_or_default(alignment_A, alignment_pref_A) alignment_B = check.alignment_or_default(alignment_B, alignment_pref_B) tensor_A = TensorDescription(self._element_a, self._layout_a, alignment_A) tensor_B = TensorDescription(self._element_b, self._layout_b, alignment_B) if alignment_C is None: alignment_C = max(self.possible_operations.alignments("C")) if self._element_c != DataType.void: alignment_C = min(128 // DataTypeSize[self._element_c], alignment_C) if tile_description is None: if self._tile_description is None: op = self.possible_operations.operations(alignment_A, alignment_B, alignment_C, self._math_operation)[0] tile_description = datatypes.td_from_profiler_op(op) # The selected op may have lower alignment than that determined above, so we must # reset alignment here. alignment_C = op.C.alignment else: tile_description = self._tile_description else: valid, err_str = self._valid_tile_description(tile_description) if not valid: raise Exception(f"Invalid tile description. {err_str}") self._tile_description = tile_description tensor_C = TensorDescription(self._element_c, self._layout_c, alignment_C) self.epilogue_functor = self._reset_epilogue_functor_alignment(alignment_C, self.epilogue_functor) operation = GemmOperationUniversal( arch=self.current_cc, tile_description=tile_description, A=tensor_A, B=tensor_B, C=tensor_C, epilogue_functor=self.epilogue_functor, swizzling_functor=self._swizzling_functor, ) return operation def compile(self, tile_description: TileDescription = None, alignment_A: int = None, alignment_B: int = None, alignment_C: int = None, print_module: bool = False) -> cutlass.backend.GemmOperationUniversal: """ Emits and compiles the kernel currently specified. If ``tile_description`` and any of the ``alignment`` parameters are set, the kernel will be chosen using this tile description and alignments. Otherwise, a default tile description and alignment will be used. :param tile_description: tile description specifying shapes and operand types to use in the kernel :type tile_description: cutlass.backend.TileDescription :param alignment_A: alignment of operand A :type alignment_A: int :param alignment_B: alignment of operand B :type alignment_B: int :param alignment_C: alignment of operand C :type alignment_C: int :param print_module: whether to print the emitted C++ code :type print_module: bool :return: operation that was compiled :rtype: cutlass.backend.GemmOperationUniversal """ self.operation = self.construct(tile_description, alignment_A, alignment_B, alignment_C) if print_module: print(self.operation.rt_module.emit()) compiler.add_module([self.operation,]) return self.operation def _verify_rank(self, tensor): """ Verifies that ``tensor`` has rank greater than 1 :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in :type tensor: numpy/cupy/torch array/tensor object """ if len(tensor.shape) < 2: raise Exception(f"Tensors must be of rank greater than 1. Received tensor of shape: {tensor.shape}") def _get_batch_count(self, A, B, C, D) -> int: """ Returns the batch count specified by the tensors A, B, C, and D and verifies that these tensors match in batch size. Presence of a batch dimension is detected by one of the tensors being rank 3. If a batch dimension is present, it must be present in one of operands A, B, or C (but need not be in all), and must be present in D. :param A: tensor A :type A: numpy/cupy/torch array/tensor object :param B: tensor B :type B: numpy/cupy/torch array/tensor object :param C: tensor C :type C: numpy/cupy/torch array/tensor object :param D: tensor D :type D: numpy/cupy/torch array/tensor object :return: tuple of batch count dimensions :rtype: tuple """ A_batch = prod(A.shape[:-2]) if len(A.shape) > 2 else 1 B_batch = prod(B.shape[:-2]) if len(B.shape) > 2 else 1 if 1 not in [A_batch, B_batch]: if A_batch != B_batch: raise Exception(f"Get invalid batch counts: A={A_batch}, B={B_batch}") return max(A_batch, B_batch) def _get_batch_stride(self, tensor) -> int: """ Returns the batch stride of ``tensor``. If ``tensor`` is only rank-2, batch stride is 0. :param tensor: tensor object to process :type tensor: numpy/cupy/torch array/tensor object :return: stride between each matrix in the batch :rtype: int """ if tensor is not None and len(tensor.shape) > 2: return tensor.shape[-2] * tensor.shape[-1] else: return 0 def _get_problem_args(self, A, B, C, D) -> tuple: """ Returns the problem size and GEMM universal mode to use for the given operands. :param A: tensor A :type A: numpy/cupy/torch array/tensor object :param B: tensor B :type B: numpy/cupy/torch array/tensor object :param C: tensor C :type C: numpy/cupy/torch array/tensor object :param D: tensor D :type D: numpy/cupy/torch array/tensor object :return: tuple containing the problem size (cutlass.shape.GemmCoord), the GEMM mode (cutlass.GemmUniversalMode), and the batch count (int) :rtype: tuple """ M, K = A.shape[-2:] N = B.shape[-1] mode = GemmUniversalMode.Gemm batch_count = self._get_batch_count(A, B, C, D) returned_batch_count = batch_count # If we are running a batched GEMM in which there is a nonzero batch stride # only for A, then we can fold the batched dimension of A into the M dimension # (i.e., (b, m, k) x (k, n) -> (m*b, k) x (k, n)). This works only if both A # and C are row major. A similar operation can be performed if only B has a nonzero # batch dimension if batch_count > 1: A_row = self._layout_a == cutlass.LayoutType.RowMajor B_row = self._layout_b == cutlass.LayoutType.RowMajor C_row = self._layout_c == cutlass.LayoutType.RowMajor # Consider a Tensor to be batched if its rank is > 2 and # the product of the modes beyond rank 2 equals our pre-determined batch size. batched = lambda x : x is None or (len(x.shape) > 2 and prod(x.shape[:-2]) == batch_count) if batched(A) and not batched(B) and (C is None or batched(C)) and A_row and C_row: M *= batch_count returned_batch_count = 1 elif not batched(A) and batched(B) and (C is None or batched(C)) and not B_row and not C_row: N *= batch_count returned_batch_count = 1 else: mode = GemmUniversalMode.Batched return GemmCoord(M, N, K), mode, returned_batch_count def _verify_type_and_layout(self, tensor, ref_type, ref_layout, name): """ Verifies that ``tensor`` has data type ``ref_type`` and layout ``ref_layout``. An exception is raised if it does not. :param tensor: object representing a tensor passed in to verify, or ``None`` if no tensor was passed in :type tensor: numpy/cupy/torch array/tensor object :param ref_dtype: data type for the tensor that this object was initialized to :param ref_layout: layout for the tensor that this object was initialized to :param name: identifier of the tensor to verify. Used in raising exceptions :type name: str """ dtype, layout = datatypes.get_datatype_and_layout(tensor) if dtype != ref_type or layout != ref_layout: try: # Attempt to transpose the tensor to fit the desired layout tensor = tensor.transpose(-1, -2) except: raise Exception(f'Tensor {name} with type and layout ({dtype}, {layout}) ' f'does not match the expected type and ' f'layout of ({ref_type}, {ref_layout}) and transpose failed.') def run(self, A=None, B=None, C=None, D=None, alpha=None, beta=None, sync: bool = True, print_module: bool = False, visitor_args: dict = None, stream: cuda.CUstream = cuda.CUstream(0)) -> GemmArguments: """ Runs the kernel currently specified. If it has not already been, the kernel is emitted and compiled. Tensors holding operands and outputs of the kernel are sourced either from the ``A``, ``B``, ``C``, ``D``, ``alpha``, and ``beta`` parameters provided in this call, or from those passed in on the construction of this object -- one of the two must be specified. By default, this call returns only once the kernel has completed. To launch the kernel and immediately return, set ``sync=False``. In this case, it is the responsibility of the caller to syncrhonize the results of the kernel before attempting to access outputs by calling ``sync()`` on the arguments returned from this call. :param A: tensor representing data type and layout of operand A :param B: tensor representing data type and layout of operand B :param C: tensor representing data type and layout of operand C :param D: tensor representing data type and layout of operand D :param alpha: scalar paramter alpha from GEMM computation that scales the product of operands A and B :param beta: scalar parameter beta from GEMM operation that scales operand C :param sync: whether the call should wait for the kernel to complete before returning :type sync: bool :param print_module: whether to print the emitted C++ code :type print_module: bool :param stream: cuda stream, defaults to cuda.cuda.CUstream(0) :type stream: :class:`cuda.cuda.CUstream` :return: arguments passed in to the kernel :rtype: cutlass.backend.GemmArguments """ super().run_setup() A = self._verify_tensor(A, self.A, self._element_a, self._layout_a, "A") B = self._verify_tensor(B, self.B, self._element_b, self._layout_b, "B") C = self._verify_tensor(C, self.C, self._element_c, self._layout_c, "C") D = self._verify_tensor(D, self.D, self._element_d, self._layout_d, "D") alpha = self._verify_scalar(alpha, self.alpha, self._element_c, "alpha") beta = self._verify_scalar(beta, self.beta, self._element_c, "beta") is_void_c = self._element_c == DataType.void self._verify_rank(A) self._verify_rank(B) if not is_void_c: self._verify_rank(C) self._verify_rank(D) alignment_a = self.possible_operations.find_alignment(A.shape, self._layout_a, operand="A") alignment_b = self.possible_operations.find_alignment(B.shape, self._layout_b, operand="B") # Set C alignment based on D.shape so as to correctly get an alignment with void-C # kernels, for which `C` is None. alignment_c = self.possible_operations.find_alignment(D.shape, self._layout_c, operand="C") self.compile(self._tile_description, alignment_A=alignment_a, alignment_B=alignment_b, alignment_C=alignment_c, print_module=print_module) problem_size, mode, batch_count = self._get_problem_args(A, B, C, D) if mode == GemmUniversalMode.Gemm or batch_count == 1: kwargs = {'split_k_slices': 1} else: kwargs = { 'batch': batch_count, 'batch_strides': { 'A': self._get_batch_stride(A), 'B': self._get_batch_stride(B), 'C': self._get_batch_stride(C), 'D': self._get_batch_stride(D) } } kwargs['stream'] = stream if isinstance(self.epilogue_functor, EpilogueFunctorVisitor): output_op = self.operation.epilogue_type(visitor_args) else: output_op = self.operation.epilogue_type(alpha, beta) arguments = GemmArguments( operation=self.operation, problem_size=problem_size, A=A, B=B, C=C, D=D, output_op=output_op, gemm_mode=mode, **kwargs ) self.operation.run(arguments) if sync: arguments.sync() return arguments
cutlass/python/cutlass/op/gemm.py/0
{ "file_path": "cutlass/python/cutlass/op/gemm.py", "repo_id": "cutlass", "token_count": 12897 }
42
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for filtering CUTLASS library kernels and emitting library intitialization and building code """ import enum import logging import os.path import shutil try: import builtins if hasattr(builtins, "CUTLASS_IGNORE_PACKAGE") and CUTLASS_IGNORE_PACKAGE == True: raise ImportError("Disabling attempt to import cutlass_library") from cutlass_library.library import * from cutlass_library.gemm_operation import * from cutlass_library.rank_k_operation import * from cutlass_library.rank_2k_operation import * from cutlass_library.trmm_operation import * from cutlass_library.symm_operation import * from cutlass_library.conv2d_operation import * from cutlass_library.conv3d_operation import * except ImportError: from library import * from gemm_operation import * from rank_k_operation import * from rank_2k_operation import * from trmm_operation import * from symm_operation import * from conv2d_operation import * from conv3d_operation import * ################################################################################################### _LOGGER = logging.getLogger(__name__) class EmitOperationKindAll: """ Emit the OperationKind-level CUTLASS library initialization code. The code is generated in the {generated_path}/{operation_kind} directory (e.g., tools/library/generated/gemm in the build directory, for OperationKind=Gemm), in the all_{operation_kind}_operations.cu file (e.g., all_gemm_operations.cu for OperationKind=Gemm). That file declares several functions in namespace cutlass::library. The functions all have this form, void initialize_{configuration_name}(Manifest& manifest); The file also _defines_ the following function in that namespace. void initialize_all_{operation_kind}_operations(Manifest& manifest); That function calls all of the functions declared in this file. Those functions are defined in subdirectories (which this class does not create). """ def __init__(self, generated_path, kind, args): self.generated_path = generated_path self.kind = kind self.args = args self.header_template =""" /* Generated by manifest.py - Do not edit. */ #include "cutlass/cutlass.h" #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// """ self.entry_template = """ // // Entry point to construct operations // void initialize_all_${operation_name}_operations(Manifest &manifest) { """ self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n" self.configuration_template =" initialize_${configuration_name}(manifest);\n" self.epilogue_template ="""} /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass """ # def __enter__(self): _LOGGER.debug("*** EmitOperationKindAll::__enter__") self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind]) _LOGGER.debug('*** operation_path (directory to create): ' + str(self.operation_path)); os.makedirs(self.operation_path, exist_ok=True) self.top_level_path = os.path.join(self.operation_path, f"all_{OperationKindNames[self.kind]}_operations.cu") _LOGGER.debug(f"*** top_level_path (file to write): {str(self.top_level_path)}") self.top_level_file = open(self.top_level_path, "w") self.top_level_file.write(self.header_template) self.source_files = [self.top_level_path,] self.configurations = [] return self # def emit(self, operations): _LOGGER.debug('*** EmitOperationKindAll::emit') _LOGGER.debug(f"*** len(operations): {len(operations)}") _LOGGER.debug(f"*** min_cc list: {sorted(min_cc for min_cc, _ in operations.items())}") for min_cc, configurations in sorted(operations.items()): _LOGGER.debug(f"*** min_cc={min_cc}") for configuration_name, _ in configurations.items(): _LOGGER.debug(f"*** configuration_name={configuration_name}") self.configurations.append(configuration_name) self.top_level_file.write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} )) # def __exit__(self, exception_type, exception_value, traceback): _LOGGER.debug("*** EmitOperationKindAll::__exit__") self.top_level_file.write(SubstituteTemplate(self.entry_template, {'operation_name': OperationKindNames[self.kind]})) for configuration_name in self.configurations: self.top_level_file.write(SubstituteTemplate(self.configuration_template, {'configuration_name': configuration_name})) self.top_level_file.write(self.epilogue_template) self.top_level_file.close() class EmitOperationKindLibrary: """ Emit the CUTLASS library initialization code for each OperationKind. The code is generated in the directory {generated_path}/{operation_kind}/{min_cc} (e.g., tools/library/generated/gemm/90 in the build directory, for min_cc=90 and OperationKind=Gemm), in the file all_sm{min_cc}_{operation_kind}_operations.cu (e.g., all_sm90_gemm_operations.cu for min_cc=90 and OperationKind=Gemm). The min_cc variable here indicates the minimum GPU architecture version that the things to be initialized require. For example, min_cc=90 indicates sm90. That file declares several functions in namespace cutlass::library. The functions all have this form, void initialize_all_sm{min_cc}_{subclass_name}_{extended_name}_operations(Manifest& manifest); where extended_name is operation.extended_name() for all the operations given to the emit method (which see below). (All operations for a given configuration_name are guaranteed to have the same extended_name().) The file also _defines_ the following function in that namespace. void initialize_all_sm{min_cc}__{operation_kind}_operations(Manifest& manifest); That function calls all of the functions declared in this file. Those functions are defined in subdirectories. The mapping from OperationKind to emitter handles the details of what happens in each of those subdirectories. """ def __init__(self, generated_path, min_cc, kind, args): self.generated_path = generated_path self.min_cc = min_cc self.kind = kind self.args = args self.emitters = { OperationKind.Gemm: EmitGemmConfigurationLibrary, OperationKind.Conv2d: EmitConv2dConfigurationLibrary, OperationKind.Conv3d: EmitConv3dConfigurationLibrary, OperationKind.RankK: EmitRankKConfigurationLibrary, OperationKind.Rank2K: EmitRank2KConfigurationLibrary, OperationKind.Trmm: EmitTrmmConfigurationLibrary, OperationKind.Symm: EmitSymmConfigurationLibrary } self.header_template =""" /* Generated by manifest.py - Do not edit. */ #include "cutlass/cutlass.h" #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// """ self.entry_template = """ // // Entry point to construct operations // void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest) { """ self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n" self.configuration_template = " initialize_${configuration_name}(manifest);\n" self.subclass_call_template = " initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(manifest);\n" self.subclass_prototype_template = "void initialize_all_sm${min_cc}_${subclass_name}_${operation_name}_operations(Manifest &manifest);\n" self.epilogue_template ="""} /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass """ # def __enter__(self): _LOGGER.debug("*** EmitOperationKindLibrary::__enter__") _LOGGER.debug(f"*** generated_path: {str(self.generated_path)}") _LOGGER.debug(f"*** OperationKindNames[kind]: {OperationKindNames[self.kind]}") _LOGGER.debug(f"*** min_cc: {self.min_cc}") self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind], str(self.min_cc)) _LOGGER.debug(f"*** operation_path (directory to make): {str(self.operation_path)}") os.makedirs(self.operation_path) self.top_level_path = os.path.join(self.operation_path, f"all_sm{self.min_cc}_{OperationKindNames[self.kind]}_operations.cu") _LOGGER.debug(f"*** top_level_path (file to write): {str(self.top_level_path)}") self.top_level_file = open(self.top_level_path, "w") self.top_level_file.write(self.header_template) self.source_files = {} # Each {operation_kind x cc} combination is further decomposed by the instruction # types used. This dictionary used to track the file handles for the top-level # files of each subclass self.subclass_files = {} # Configurations in each sub class self.subclass_configurations = {} return self # def emit(self, configuration_name, operations): _LOGGER.debug("*** EmitOperationKindLibrary::emit") _LOGGER.debug(f"*** configuration_name: {configuration_name}") assert len(operations) > 0 # The extended name for all operations of a given configuration_name is guaranteed # to be the same because extended_name() is used in defining configuration_name. Thus, # we can safely use the extended_name() of the first operation. extended_name = operations[0].extended_name() _LOGGER.debug('*** extended_name (for all ops): ' + extended_name) # Create a directory for operations with this subclass if it does not exist if extended_name not in self.subclass_files: subclass_path = os.path.join(self.operation_path, extended_name) _LOGGER.debug(f"*** subclass_path: {str(subclass_path)}") os.mkdir(subclass_path) self.subclass_configurations[extended_name] = [] # Open a new top-level file for this sub class subclass_top_level_path = os.path.join( subclass_path, f"all_sm{self.min_cc}_{extended_name}_{OperationKindNames[self.kind]}_operations.cu") _LOGGER.debug('*** subclass_top_level_path (min_cc, extended_name, ' + 'OperationKind): ' + str(subclass_top_level_path)) self.subclass_files[extended_name] = open(subclass_top_level_path, "w") self.subclass_files[extended_name].write(self.header_template) self.source_files[extended_name] = [subclass_top_level_path] subclass_dir = os.path.dirname(self.subclass_files[extended_name].name) _LOGGER.debug('*** subclass_dir: ' + str(subclass_dir)) with self.emitters[self.kind](subclass_dir, configuration_name) as configuration_emitter: for operation in operations: configuration_emitter.emit(operation) _LOGGER.debug('*** configuration_emitter.configuration_path: ' + str(configuration_emitter.configuration_path)) self.source_files[extended_name].append(configuration_emitter.configuration_path) self.subclass_configurations[extended_name].append(configuration_name) self.subclass_files[extended_name].write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} )) # def __exit__(self, exception_type, exception_value, traceback): _LOGGER.debug("*** EmitOperationKindLibrary::__exit__") for subclass_name, subclass_file in sorted(self.subclass_files.items()): subclass_cfg = { 'min_cc': str(self.min_cc), 'subclass_name': subclass_name, 'operation_name': OperationKindNames[self.kind] } self.top_level_file.write(SubstituteTemplate(self.subclass_prototype_template, subclass_cfg)) self.top_level_file.write( SubstituteTemplate(self.entry_template, { 'min_cc': str(self.min_cc), 'subclass_name': '', 'operation_name': OperationKindNames[self.kind] })) # Finish and close all subclass files for subclass_name, subclass_file in sorted(self.subclass_files.items()): subclass_cfg = { 'min_cc': str(self.min_cc), 'subclass_name': subclass_name, 'operation_name': OperationKindNames[self.kind] } subclass_file.write(SubstituteTemplate(self.entry_template, subclass_cfg)) for configuration in self.subclass_configurations[subclass_name]: subclass_file.write( SubstituteTemplate(self.configuration_template, { 'configuration_name': configuration })) subclass_file.write(self.epilogue_template) subclass_file.close() # Write the call to initialize_all for this subclass to the top-level file self.top_level_file.write(SubstituteTemplate(self.subclass_call_template, subclass_cfg)) self.top_level_file.write(self.epilogue_template) self.top_level_file.close() class EmitInterfaceLibrary: """ Emit the topmost-level CUTLASS library initialization code. The code is generated in the generated_path directory (e.g., tools/library/generated in the build directory), in the initialize_all.cpp file. That file declares several functions in namespace cutlass::library. The functions all have this form, void initialize_all_{operation_kind}_operations(Manifest& manifest); where {operation_kind} abbreviates the "kind" of operation (e.g., gemm for matrix-matrix multiply, conv2d for 2-d convolution, or trmm for triangular solve with multiple right-hand sides). The definitions of these functions live in subdirectories. The file also _defines_ the following function in that namespace. void initialize_all(Manifest& manifest); That function first prepares the manifest, and then calls all of the functions declared in this file. """ def __init__(self, generated_path, operation_count, args): self.generated_path = generated_path self.args = args self.prototypes = [] self.fn_calls = [] self.operation_count = str(operation_count) self.top_level_hdr_template = ''' /* Generated by manifest.py - Do not edit. */ ''' self.top_level_prologue = ''' #include "cutlass/library/library.h" #include "cutlass/library/manifest.h" namespace cutlass { \tnamespace library { ${prototypes} ''' self.top_level_initialize_kind = ''' \t\tvoid initialize_all_${kind}_operations(Manifest &manifest) { ${fn_calls} \t\t} ''' self.top_level_initialize = ''' \t\tvoid initialize_all(Manifest &manifest) { \t\t\tmanifest.reserve(${operation_count});\n ${fn_calls} \t\t} ''' self.top_level_suffix = ''' \t} // namespace library } // namespace cutlass ''' # def __enter__(self): _LOGGER.debug("*** EmitInterfaceLibrary::__enter__") self.top_level_path = os.path.join(self.generated_path, 'initialize_all.cpp') _LOGGER.debug("*** top_level_path: " + str(self.top_level_path)) self.top_level_file = open(self.top_level_path, "w") self.top_level_file.write(self.top_level_hdr_template) self.source_files = [self.top_level_path,] return self # def emit(self, operation_name): _LOGGER.debug("*** EmitInterfaceLibrary::emit") _LOGGER.debug("*** operation_name: " + operation_name) self.prototypes.append(SubstituteTemplate( "\t\tvoid initialize_all_${operation_kind}_operations(Manifest &manifest);", {'operation_kind': operation_name})) self.fn_calls.append(SubstituteTemplate( "\t\t\tinitialize_all_${operation_kind}_operations(manifest);", {'operation_kind': operation_name})) # def __exit__(self, exception_type, exception_value, traceback): _LOGGER.debug("*** EmitInterfaceLibrary::__exit__") self.top_level_file.write(SubstituteTemplate(self.top_level_prologue, {'prototypes':"\n".join(self.prototypes)})) # Write out initialize_all method self.top_level_file.write(SubstituteTemplate(self.top_level_initialize, {'operation_count': self.operation_count, 'fn_calls':"\n".join(self.fn_calls)})) self.top_level_file.write(self.top_level_suffix) self.top_level_file.close() ################################################################################################### ################################################################################################### class Options: def __init__(self): pass ################################################################################################### # class Manifest: # def __init__(self, args = None): self.operations = {} self.args = args self.operation_count = 0 self.operations_by_name = {} self.kernel_filter = '' self.kernel_filter_list = [] self.kernel_names = [] self.operations_enabled = [] self.selected_kernels = [] self.ignore_kernel_names = [] self.compute_capabilities = [50,] self.curr_build_dir = '.' self.filter_by_cc = True if self.args: self.kernel_filter = self.args.kernels self.curr_build_dir = args.curr_build_dir # A common user error is to use commas instead of semicolons. if ',' in args.architectures: raise RuntimeError("The list of architectures (CMake option CUTLASS_NVCC_ARCHS) must be semicolon-delimited.\nDon't use commas to separate the architectures; use semicolons.\nYou specified the list as: " + args.architectures) architectures = args.architectures.split(';') if len(args.architectures) else ['50',] arch_conditional_cc = ['90a'] architectures = [x if x not in arch_conditional_cc else x.split('a')[0] for x in architectures] self.compute_capabilities = [int(x) for x in architectures] if args.filter_by_cc in ['false', 'False', '0']: self.filter_by_cc = False if args.operations == 'all': self.operations_enabled = [] else: operations_list = [ OperationKind.Gemm , OperationKind.Conv2d , OperationKind.Conv3d , OperationKind.RankK , OperationKind.Trmm , OperationKind.Symm ] self.operations_enabled = [x for x in operations_list if OperationKindNames[x] in args.operations.split(',')] if args.kernels == 'all': self.kernel_names = [] else: self.kernel_names = [x for x in args.kernels.split(',') if x != ''] self.ignore_kernel_names = [x for x in args.ignore_kernels.split(',') if x != ''] if args.kernel_filter_file is None: self.kernel_filter_list = [] else: self.kernel_filter_list = self.get_kernel_filters(args.kernel_filter_file) _LOGGER.debug("Using {filter_count} kernel filters from {filter_file}".format( filter_count = len(self.kernel_filter_list), filter_file = args.kernel_filter_file)) self.operation_count = 0 self.operations_by_name = {} self.disable_full_archs_compilation = args.disable_full_archs_compilation def get_kernel_filters (self, kernelListFile): if os.path.isfile(kernelListFile): with open(kernelListFile, 'r') as fileReader: lines = [line.rstrip() for line in fileReader if not line.startswith("#")] lines = [re.compile(line) for line in lines if line] return lines else: return [] # def filter_out_kernels(self, kernel_name, kernel_filter_list): for kernel_filter_re in kernel_filter_list: if kernel_filter_re.search(kernel_name) is not None: return True return False # def _filter_string_matches(self, filter_string, haystack): ''' Returns true if all substrings appear in the haystack in order''' substrings = filter_string.split('*') for sub in substrings: idx = haystack.find(sub) if idx < 0: return False haystack = haystack[idx + len(sub):] return True # def filter(self, operation): ''' Filtering operations based on various criteria''' # filter based on compute capability enabled = not (self.filter_by_cc) for cc in self.compute_capabilities: if cc >= operation.tile_description.minimum_compute_capability and \ cc <= operation.tile_description.maximum_compute_capability and \ (cc not in SharedMemPerCC or SharedMemPerCC[cc] >= CalculateSmemUsage(operation)): enabled = True break if not enabled: return False if len(self.operations_enabled) and not operation.operation_kind in self.operations_enabled: return False # eliminate duplicates if operation.procedural_name() in self.operations_by_name.keys(): return False # Filter based on list of valid substrings if len(self.kernel_names): name = operation.procedural_name() enabled = False # compare against the include list for name_substr in self.kernel_names: if self._filter_string_matches(name_substr, name): _LOGGER.debug("Kernel {kernel} included due to filter string '{filt}'.".format( kernel = operation.procedural_name(), filt = name_substr)) enabled = True break # compare against the exclude list for name_substr in self.ignore_kernel_names: if self._filter_string_matches(name_substr, name): _LOGGER.debug("Kernel {kernel} ignored due to filter string '{filt}'.".format( kernel = operation.procedural_name(), filt = name_substr)) enabled = False break if len(self.kernel_filter_list) > 0: if self.filter_out_kernels(operation.procedural_name(), self.kernel_filter_list): _LOGGER.debug("Kernel {kernel} matched via kernel filter file.".format(kernel = operation.procedural_name())) enabled = True else: _LOGGER.debug("Kernel {kernel} culled due to no match in kernel filter file.".format(kernel = operation.procedural_name())) enabled = False # TODO: filter based on compute data type return enabled # # def append(self, operation): ''' Inserts the operation. operation_kind -> configuration_name -> [] ''' if self.filter(operation): self.selected_kernels.append(operation.procedural_name()) self.operations_by_name[operation.procedural_name()] = operation # add the configuration configuration_name = operation.configuration_name() # Split operations by minimum CC min_cc = operation.arch if operation.operation_kind not in self.operations.keys(): self.operations[operation.operation_kind] = {} if min_cc not in self.operations[operation.operation_kind]: self.operations[operation.operation_kind][min_cc] = {} if configuration_name not in self.operations[operation.operation_kind][min_cc].keys(): self.operations[operation.operation_kind][min_cc][configuration_name] = [] self.operations[operation.operation_kind][min_cc][configuration_name].append(operation) self.operation_count += 1 else: _LOGGER.debug("Culled {} from manifest".format(operation.procedural_name())) # def emit_manifest_cmake(self, manifest_path, top_level_path, source_files): with open(manifest_path, "w") as manifest_file: target_text = SubstituteTemplate("""cutlass_target_sources(cutlass_library_objs PRIVATE """, { }) manifest_file.write(target_text + '\n\n') manifest_file.write(" %s\n" % str(top_level_path.replace('\\', '/'))) generated_path = os.path.join(self.curr_build_dir, 'generated') for kind in self.operations.keys(): kind_str = OperationKindNames[kind] all_kind_file = os.path.join(generated_path, kind_str, f"all_{kind_str}_operations.cu").replace('\\', '/') manifest_file.write(f" {all_kind_file}\n") manifest_file.write(')\n\n') for kind in self.operations.keys(): for min_cc in sorted(self.operations[kind].keys()): for subclass in sorted(source_files[kind][min_cc].keys()): target_text = SubstituteTemplate("""cutlass_add_cutlass_library( SUFFIX ${kind}_sm${min_cc}_${subclass} """, { 'min_cc': str(min_cc), 'kind': OperationKindNames[kind], 'subclass': subclass }) manifest_file.write(target_text + '\n\n') for source_file in source_files[kind][min_cc][subclass]: manifest_file.write(" %s\n" % str(source_file.replace('\\', '/'))) manifest_file.write(")\n") if self.disable_full_archs_compilation: self.emit_disable_full_archs_compilation(manifest_file, source_files) def emit_disable_full_archs_compilation(manifest_file, source_files): def for_hopper(name): pass def for_ampere(name): return "16816" in name or \ "16832" in name or \ "16864" in name or \ ("1688" in name and "tf32" in name) def for_turing(name): return ("1688" in name and "tf32" not in name) or \ "8816" in name def for_volta(name): return "884" in name def is_cpp(name): return name.endswith(".cpp") def get_src_archs_str_given_requested_cuda_archs(archs, source_file): intersected_archs = archs & set(self.compute_capabilities) if intersected_archs == set(): raise RuntimeError( """ Empty archs set for file {} after taking the intersection of {} (global requested archs) and {} (per file requested archs) """.format(source_file, set(self.compute_capabilities), archs)) else: return " ".join(map(str, intersected_archs)) for min_cc in sorted(source_files.keys()): for source_file in source_files[min_cc]: if is_cpp(source_file): continue # skip because source is cpp elif for_ampere(source_file): archs_str = get_src_archs_str_given_requested_cuda_archs({80, 87, 90}, source_file) elif for_turing(source_file): archs_str = get_src_archs_str_given_requested_cuda_archs({75}, source_file) elif for_volta(source_file): archs_str = get_src_archs_str_given_requested_cuda_archs({70, 72}, source_file) else: raise RuntimeError("Per file archs are not set {}, as there is no rule specified for this file pattern".format(source_file)) manifest_file.write("cutlass_apply_cuda_gencode_flags({} SM_ARCHS {})\n".format(str(source_file.replace('\\', '/')), archs_str)) # def emit(self, target = GeneratorTarget.Library): operation_emitters = { GeneratorTarget.Library: EmitOperationKindLibrary } # Emitters for all operations that fall under a particular kind (e.g., GEMM, Conv2d) kind_emitters = { GeneratorTarget.Library: EmitOperationKindAll } interface_emitters = { GeneratorTarget.Library: EmitInterfaceLibrary } generated_path = os.path.join(self.curr_build_dir, 'generated') # create generated/ if os.path.exists(generated_path): shutil.rmtree(generated_path) os.mkdir(generated_path) with interface_emitters[target](generated_path, self.operation_count, self.args) as iface_emitter: top_level_path = iface_emitter.top_level_path for operation_kind in self.operations.keys(): iface_emitter.emit(OperationKindNames[operation_kind]) source_files = {} for kind in self.operations.keys(): source_files[kind] = {} for min_cc in self.operations[kind].keys(): source_files[kind][min_cc] = {} for operation_kind, ops in self.operations.items(): for min_cc, configurations in sorted(ops.items()): with operation_emitters[target](generated_path, min_cc, operation_kind, self.args) as operation_kind_emitter: for configuration_name, operations in configurations.items(): _LOGGER.info(f"Emitting {configuration_name} with {len(operations)} operation{'' if len(operations) == 1 else 's'}.") operation_kind_emitter.emit(configuration_name, operations) for subclass, files in operation_kind_emitter.source_files.items(): if subclass not in source_files[operation_kind][min_cc]: source_files[operation_kind][min_cc][subclass] = [] source_files[operation_kind][min_cc][subclass].extend(operation_kind_emitter.source_files[subclass]) # Emit top level all_{gemm, conv2d, ...}_operations.cu files with kind_emitters[target](generated_path, operation_kind, self.args) as operation_kind_emitter: operation_kind_emitter.emit(ops) # write the manifest.cmake file containing paths from all targets manifest_path = os.path.join(generated_path, "manifest.cmake") self.emit_manifest_cmake(manifest_path, top_level_path, source_files) ###################################################################################################
cutlass/python/cutlass_library/manifest.py/0
{ "file_path": "cutlass/python/cutlass_library/manifest.py", "repo_id": "cutlass", "token_count": 11132 }
43
/* Copy buttons */ button.copybtn { position: absolute; display: flex; top: .3em; right: .3em; width: 1.7em; height: 1.7em; opacity: 0; transition: opacity 0.3s, border .3s, background-color .3s; user-select: none; padding: 0; border: none; outline: none; border-radius: 0.4em; /* The colors that GitHub uses */ border: #1b1f2426 1px solid; background-color: #f6f8fa; color: #57606a; } button.copybtn.success { border-color: #22863a; color: #22863a; } button.copybtn svg { stroke: currentColor; width: 1.5em; height: 1.5em; padding: 0.1em; } div.highlight { position: relative; } /* Show the copybutton */ .highlight:hover button.copybtn, button.copybtn.success { opacity: 1; } .highlight button.copybtn:hover { background-color: rgb(235, 235, 235); } .highlight button.copybtn:active { background-color: rgb(187, 187, 187); } /** * A minimal CSS-only tooltip copied from: * https://codepen.io/mildrenben/pen/rVBrpK * * To use, write HTML like the following: * * <p class="o-tooltip--left" data-tooltip="Hey">Short</p> */ .o-tooltip--left { position: relative; } .o-tooltip--left:after { opacity: 0; visibility: hidden; position: absolute; content: attr(data-tooltip); padding: .2em; font-size: .8em; left: -.2em; background: grey; color: white; white-space: nowrap; z-index: 2; border-radius: 2px; transform: translateX(-102%) translateY(0); transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); } .o-tooltip--left:hover:after { display: block; opacity: 1; visibility: visible; transform: translateX(-100%) translateY(0); transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); transition-delay: .5s; } /* By default the copy button shouldn't show up when printing a page */ @media print { button.copybtn { display: none; } }
cutlass/python/docs/_static/copybutton.css/0
{ "file_path": "cutlass/python/docs/_static/copybutton.css", "repo_id": "cutlass", "token_count": 880 }
44
{ "path": "./../../../../examples/python/00_basic_gemm.ipynb" }
cutlass/python/docs_src/source/externals/00_basic_gemm.nblink/0
{ "file_path": "cutlass/python/docs_src/source/externals/00_basic_gemm.nblink", "repo_id": "cutlass", "token_count": 31 }
45
################################################################################################# # # Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# """ Utilities for defining Conv2D problem sizes for testing. This file was ported from the C++ version in test/unit/conv/device/conv2d_problems.h """ from cutlass_library import ConvMode import cutlass from cutlass.shape import Conv2DProblemSize class TestbedConv2dProblemSizes: def __init__(self, minimum_channel_size: int): conv2d_default_sizes = self.initialize_conv2d_default_sizes(minimum_channel_size) conv2d_rigorous_sizes = self.initialize_conv2d_rigorous_sizes(minimum_channel_size) conv2d_resnet50_sizes = self.initialize_conv2d_resnet50_sizes(1) conv2d_resnet50_sizes_perf = self.initialize_conv2d_resnet50_sizes(34) grouped_sizes = self.initialize_conv2d_grouped_sizes() # Filter all problems self.all = [] for size_list in [conv2d_default_sizes, conv2d_rigorous_sizes, conv2d_resnet50_sizes, conv2d_resnet50_sizes_perf, grouped_sizes]: for size in size_list: if (size.C // size.groups) % minimum_channel_size == 0: self.all.append(size) def initialize_conv2d_default_sizes(self, minimum_channel_size): # Small input size x stride (1,1) # C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64} conv2d_default_sizes = [] conv2d_default_sizes.append(Conv2DProblemSize( 1, 1, 1, minimum_channel_size, 8, 1, 1, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 1, 8, minimum_channel_size, 8, 1, 3, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 7, 8, minimum_channel_size, 8, 3, 3, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 7, 9, minimum_channel_size, 8, 4, 4, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 2, 7, 9, minimum_channel_size, 8, 5, 5, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 3, 7, 9, minimum_channel_size, 8, 6, 5, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 3, 7, 9, minimum_channel_size, 8, 6, 6, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 3, 7, 9, minimum_channel_size, 8, 7, 7, minimum_channel_size, 1, 1, 1, 1, 1, 1, )) ############################################## # Small input size x stride (2,2) # C < CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64} ############################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 11, 7, minimum_channel_size, 8, 1, 1, minimum_channel_size, 0, 0, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 11, 7, minimum_channel_size, 8, 3, 3, minimum_channel_size, 1, 1, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 13, 11, minimum_channel_size, 8, 1, 1, minimum_channel_size, 1, 1, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 17, 19, minimum_channel_size, 16, 2, 2, minimum_channel_size, 1, 1, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 23, 5, minimum_channel_size, 16, 3, 3, minimum_channel_size, 1, 1, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 13, 17, 8, 24, 3, 3, 8, 0, 0, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 23, 21, 8, 24, 3, 3, 8, 1, 1, 3, 3, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 20, 24, 8, 40, 3, 3, 8, 3, 3, 3, 3, 1, 1, )) ########################################## # Medium input size (1x16x16x128), filter size (1x1, 2x2, 3x3, 5x5), stride (1, 1) ########################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 15, 19, 160, 224, 1, 1, 160, 0, 0, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 19, 37, 160, 224, 3, 3, 160, 1, 1, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 16, 16, 160, 224, 2, 3, 160, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 23, 21, 128, 224, 3, 3, 128, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 29, 37, 160, 224, 5, 5, 160, 2, 2, 1, 1, 1, 1, )) ########################################## # C > CTA::K and non-multiples of CTA::K. Typical CTA::K = {32, 64} ########################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 15, 19, 32 + minimum_channel_size, 96, 3, 3, 32 + minimum_channel_size, 1, 1, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 16, 24, 64 + minimum_channel_size, 96, 3, 3, 64 + minimum_channel_size, 1, 1, 1, 1, 1, 1, )) ########################################## # Medium input size, filter size (1x1, 3,x3, 5x5, 7x7), stride (2, 2) ########################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 13, 16, 288, 160, 5, 5, 288, 2, 2, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 55, 51, 256, 512, 1, 1, 256, 0, 0, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 71, 80, 32, 64, 5, 5, 32, 2, 2, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 224, 224, 8, 64, 7, 7, 8, 3, 3, 2, 2, 1, 1, )) ########################################## # Medium input size stride (3, 3), filter (3, 3), non-default padding ########################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 27, 23, 256, 512, 3, 3, 256, 0, 0, 3, 3, 1, 1, )) ########################################## # Medium input size padding > stride, asymmetric filter, padding and striding ########################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 27, 31, 256, 512, 3, 3, 256, 5, 7, 3, 4, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 27, 35, 256, 512, 7, 5, 256, 11, 7, 3, 5, 1, 1, )) ########################################## # Medium input size *mixed* stride (1, 2) and (2, 1), # filter (3, 3), default padding ########################################## conv2d_default_sizes.append(Conv2DProblemSize( 1, 27, 27, 256, 512, 3, 3, 256, 1, 1, 1, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 27, 27, 256, 512, 3, 3, 256, 1, 1, 2, 1, 1, 1, )) ######################################/ # Additional input size ######################################/ conv2d_default_sizes.append(Conv2DProblemSize( 3, 28, 28, 256, 256, 2, 2, 256, 0, 0, 2, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 1, 32, 32, 16, 32, 3, 3, 16, 1, 1, 6, 2, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 32, 24, 32, 32, 32, 1, 2, 32, 0, 0, 1, 1, 1, 1, )) conv2d_default_sizes.append(Conv2DProblemSize( 4, 2, 3, 256, 328, 3, 5, 256, 1, 1, 1, 1, 1, 1, )) return conv2d_default_sizes # Add a few large and rigorous convolution problem sizes def initialize_conv2d_rigorous_sizes(self, minimum_channel_size): sizes = [] if False: sizes.append(Conv2DProblemSize.from_sizes( (1, 124, 224, 2 * minimum_channel_size), (24, 7, 7, 2 * minimum_channel_size), )) sizes.append(Conv2DProblemSize.from_sizes( (1, 233, 35, minimum_channel_size), (24, 7, 5, minimum_channel_size), )) return sizes # Add resent50 layers to unit testing sizes def initialize_conv2d_resnet50_sizes(self, batch_size): conv2d_problem_vector = [] conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 56, 56, 64, 256, 1, 1, 64, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 56, 56, 64, 64, 1, 1, 64, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 56, 56, 64, 64, 3, 3, 64, 1, 1, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 56, 56, 256, 64, 1, 1, 256, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 56, 56, 256, 512, 1, 1, 256, 0, 0, 2, 2, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 56, 56, 256, 128, 1, 1, 256, 0, 0, 2, 2, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 28, 28, 128, 128, 3, 3, 128, 1, 1, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 28, 28, 128, 512, 1, 1, 128, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 28, 28, 512, 128, 1, 1, 512, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 28, 28, 512, 1024, 1, 1, 512, 0, 0, 2, 2, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 28, 28, 512, 256, 1, 1, 512, 0, 0, 2, 2, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 14, 14, 256, 256, 3, 3, 256, 1, 1, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 14, 14, 256, 1024, 1, 1, 256, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 14, 14, 1024, 256, 1, 1, 1024, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 14, 14, 1024, 2048, 1, 1, 1024, 0, 0, 2, 2, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 14, 14, 1024, 512, 1, 1, 1024, 0, 0, 2, 2, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 7, 7, 512, 512, 3, 3, 512, 1, 1, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 7, 7, 512, 2048, 1, 1, 512, 0, 0, 1, 1, 1, 1, )) conv2d_problem_vector.append(Conv2DProblemSize( batch_size, 7, 7, 2048, 512, 1, 1, 2048, 0, 0, 1, 1, 1, 1, )) return conv2d_problem_vector def initialize_conv2d_grouped_sizes(self): threadblock_n = 128 threadblock_k = 32 sizes = [] ########################################## # One group calculated by one or multiple CTAs: k_per_group % CTA::N = 0 # One CTA calculates a single group ########################################## for cta_per_group_k in range(1, 4): for groups in range(2, 5): conv_k = cta_per_group_k * threadblock_n * groups sizes.append(Conv2DProblemSize( 1, 8, 8, threadblock_k * 2 * groups, conv_k, 3, 3, threadblock_k * 2, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, groups )) # Partial gemm_k: k_per_group == CTA::N && channels_per_group < CTA::K sizes.append(Conv2DProblemSize( 1, 8, 8, threadblock_k, threadblock_n * 2, 3, 3, threadblock_k // 2, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 2 )) sizes.append(Conv2DProblemSize( 1, 56, 56, 696, 768, 3, 3, 232, 1, 1, 2, 2, 1, 1, ConvMode.CrossCorrelation, 1, 3 )) sizes.append(Conv2DProblemSize( 1, 14, 14, 1392, 1536, 3, 3, 232, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 3 )) ########################################## # One CTA calculate multiple groups: CTA::N % k_per_group = 0 ########################################## # 2 groups per CTA sizes.append(Conv2DProblemSize( 1, 8, 8, threadblock_k * 4, threadblock_n, 3, 3, threadblock_k * 2, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 2 )) # 2 groups per CTA and partial gemm_k sizes.append(Conv2DProblemSize( 1, 8, 8, threadblock_k, threadblock_n, 3, 3, threadblock_k // 2, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 2 )) # 4 groups per CTA sizes.append(Conv2DProblemSize( 1, 8, 8, threadblock_k * 8, threadblock_n // 2, 3, 3, threadblock_k * 2, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 4 )) # 4 groups per CTA and partial gemm_k sizes.append(Conv2DProblemSize( 1, 8, 8, threadblock_k * 2, threadblock_n // 2, 3, 3, threadblock_k // 2, 1, 1, 1, 1, 1, 1, ConvMode.CrossCorrelation, 1, 4 )) return sizes
cutlass/test/python/cutlass/conv2d/conv2d_problem_sizes.py/0
{ "file_path": "cutlass/test/python/cutlass/conv2d/conv2d_problem_sizes.py", "repo_id": "cutlass", "token_count": 9922 }
46
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implicit GEMM for fused epilogue broadcast testbed Parallel split-k is not tested because we can just use regular conv kernel when we need to use parallel-splitk. Broadcast can happen in the reduction kernel. */ #pragma once #include <fstream> #include "../../common/cutlass_unit_test.h" #include "cutlass/cutlass.h" #include "cutlass/conv/device/implicit_gemm_convolution.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "conv2d_problems.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/convolution.h" #include "cutlass/util/reference/device/convolution.h" #include "cutlass/core_io.h" #include "cutlass/util/tensor_view_io.h" #include "../cache_testbed_output.h" namespace test { namespace conv { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Conv2d> struct Conv2dWithBroadcastReferenceOp { using OutputOp = typename Conv2d::EpilogueOutputOp; using ElementCompute = typename OutputOp::ElementCompute; using ElementZ = typename OutputOp::ElementZ; using ElementT = typename OutputOp::ElementT; typename OutputOp::BinaryOp binary_op; typename OutputOp::ElementwiseOp elementwise_op; Conv2dWithBroadcastReferenceOp() { } void operator()(ElementZ &Z, ElementT &T, ElementCompute conv2d, ElementCompute bias) { ElementCompute t_full = binary_op(conv2d, bias); T = ElementT(t_full); ElementCompute z_full = elementwise_op(t_full); Z = ElementZ(z_full); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Fused testbed // // Y = CONV(AB, C) // // T[n, p, q, k] = ReductionOp(Y[n, p, q, k], Broadcast[k]) // // Z[n, p, q, k] = Elementwise(T[n, p, q, k]) // template < typename Conv2d, typename ReferenceOp, bool AddBroadcastFirst = false > class TestbedConv2dWithBroadcast { public: using ElementA = typename Conv2d::ElementA; using LayoutA = typename Conv2d::LayoutA; using ElementB = typename Conv2d::ElementB; using LayoutB = typename Conv2d::LayoutB; using ElementC = typename Conv2d::ElementC; using LayoutC = typename Conv2d::LayoutC; using ElementAccumulator = typename Conv2d::ElementAccumulator; using ElementCompute = typename Conv2d::ElementCompute; using EpilogueOutputOp = typename Conv2d::EpilogueOutputOp; using ElementZ = typename EpilogueOutputOp::ElementZ; using ElementT = typename EpilogueOutputOp::ElementT; using ElementVector = typename EpilogueOutputOp::ElementVector; static cutlass::conv::Operator const kConvolutionalOperator = Conv2d::kConvolutionalOperator; static const bool kAddBroadcastFirst = AddBroadcastFirst; static const bool kStoreT = EpilogueOutputOp::kStoreT; public: /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint64_t seed; cutlass::HostTensor<ElementA, LayoutA> tensor_A; cutlass::HostTensor<ElementB, LayoutB> tensor_B; cutlass::HostTensor<ElementC, LayoutC> tensor_C; cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_C_reference; cutlass::HostTensor<ElementZ, LayoutC> tensor_Z_computed; cutlass::HostTensor<ElementZ, LayoutC> tensor_Z_reference; cutlass::HostTensor<ElementT, LayoutC> tensor_T_computed; cutlass::HostTensor<ElementT, LayoutC> tensor_T_reference; cutlass::HostTensor<ElementAccumulator, LayoutC> tensor_Y_reference; cutlass::HostTensor<ElementVector, LayoutC> tensor_Broadcast; // Input Broadcast public: TestbedConv2dWithBroadcast( cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint64_t seed_ = 2080 ): init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } /// Helper to initialize a tensor view template <typename Element, typename Layout> void initialize_tensor( cutlass::TensorView<Element, Layout> view, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { int scope; int bits = cutlass::sizeof_bits<Element>::value; if (bits <= 8) { scope = 2; } else if (bits == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope = 3; } else { scope = 5; } } else { scope = 8; } cutlass::reference::host::TensorFillRandomUniform( view, seed, scope, -scope, 0); } else if (dist_kind == cutlass::Distribution::Identity) { cutlass::reference::host::TensorFillIdentity(view); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::host::TensorFillRandomGaussian(view, seed, 0, 0.5); } else if (dist_kind == cutlass::Distribution::Sequential) { cutlass::reference::host::BlockFillSequential(view.data(), view.capacity()); } else { } } void initialize( cutlass::conv::Conv2dProblemSize const &problem_size, uint64_t seed = 2019) { tensor_A.resize(implicit_gemm_tensor_a_extent(kConvolutionalOperator, problem_size)); tensor_B.resize(implicit_gemm_tensor_b_extent(kConvolutionalOperator, problem_size)); tensor_C.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_C_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_Z_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_Z_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_T_computed.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_T_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_Y_reference.resize(implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size)); tensor_Broadcast.resize({ 1, 1, 1, implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c(), }); initialize_tensor(tensor_A.host_view(), init_A, seed); initialize_tensor(tensor_B.host_view(), init_B, seed * 17); initialize_tensor(tensor_C.host_view(), init_C, seed * 39); initialize_tensor(tensor_Broadcast.host_view(), init_C, seed * 39); for (int n = 0; n < tensor_C_reference.extent().n(); ++n) { for (int p = 0; p < tensor_C_reference.extent().h(); ++p) { for (int q = 0; q < tensor_C_reference.extent().w(); ++q) { for (int k = 0; k < tensor_C_reference.extent().c(); ++k) { tensor_C_reference.at({n, p, q, k}) = ElementAccumulator(tensor_C.at({n, p, q, k})); } } } } tensor_A.sync_device(); tensor_B.sync_device(); tensor_C.sync_device(); tensor_Broadcast.sync_device(); tensor_C_reference.sync_device(); tensor_Z_computed.sync_device(); tensor_Z_reference.sync_device(); tensor_T_computed.sync_device(); tensor_T_reference.sync_device(); tensor_Y_reference.sync_device(); } bool sufficient() const { // // Determine SMEM requirements and waive if not satisfied // size_t smem_size = sizeof(typename Conv2d::UnderlyingKernel::SharedStorage); cudaDeviceProp properties; int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } result = cudaGetDeviceProperties(&properties, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProperties() failed"); } if (properties.sharedMemPerBlockOptin < smem_size) { return false; } return true; } /// Executes one test bool run( cutlass::conv::Conv2dProblemSize const &problem_size, cutlass::conv::SplitKMode const &split_k_mode = cutlass::conv::SplitKMode::kSerial, ElementCompute alpha = ElementCompute(1), ElementCompute beta = ElementCompute(1)) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device." << std::endl; } return true; } #if 0 //display conv2d problem size for debugging std::cout << problem_size << std::endl << "alpha, beta: (" << alpha << ", " << beta << ")" << std::endl << "split_k_mode: " << ((split_k_mode == cutlass::conv::SplitKMode::kSerial) ? "(serial)" : "(parallel)") << std::endl << std::endl; #endif initialize(problem_size); // configure the operator Conv2d conv2d_op; typename Conv2d::Arguments conv2d_args( problem_size, tensor_A.device_ref(), tensor_B.device_ref(), tensor_C.device_ref(), tensor_Z_computed.device_ref(), {alpha, beta}, split_k_mode, tensor_Broadcast.device_data(), kStoreT ? tensor_T_computed.device_data() : nullptr, 0, // This must be zero implicit_gemm_tensor_c_extent(kConvolutionalOperator, problem_size).c() ); // initialize the kernel size_t workspace_size = Conv2d::get_workspace_size(conv2d_args); cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); cutlass::Status status = conv2d_op.initialize(conv2d_args, workspace.get()); if (status != cutlass::Status::kSuccess) { cudaError_t error = cudaGetLastError(); std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; return true; } // run conv2d operator status = conv2d_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } bool passed = false; cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " device reference error: " << cudaGetErrorString(result); tensor_T_computed.sync_host(); tensor_Z_computed.sync_host(); // // Reference check // // When kAddBroadcastFirst is true, add bias on the host ElementCompute beta_ref = kAddBroadcastFirst ? ElementCompute(0) : beta; #if CUTLASS_CONV_TEST_UNIT_REFERENCE_DEVICE_ENABLED cutlass::reference::device::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, LayoutC, ElementAccumulator, ElementAccumulator >( kConvolutionalOperator, problem_size, tensor_A.device_ref(), tensor_B.device_ref(), tensor_C_reference.device_ref(), tensor_Y_reference.device_ref(), alpha, beta_ref); // sync host (copy device data to host) for dumping error output in case of mismatches tensor_Y_reference.sync_host(); #else cutlass::reference::host::Conv2d< ElementA, LayoutA, ElementB, LayoutB, ElementAccumulator, LayoutC, ElementAccumulator, ElementAccumulator >( kConvolutionalOperator, problem_size, tensor_A.host_ref(), tensor_B.host_ref(), tensor_C_reference.host_ref(), tensor_Y_reference.host_ref(), alpha, beta_ref); #endif ReferenceOp reference_op; // compute tensor Z and tensor T for (int n = 0; n < problem_size.N; ++n) { for (int p = 0; p < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.P : problem_size.H); ++p) { for (int q = 0; q < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.Q : problem_size.W); ++q) { for (int k = 0; k < (kConvolutionalOperator == cutlass::conv::Operator::kFprop ? problem_size.K : problem_size.C); ++k) { ElementZ z{}; ElementT t{}; ElementCompute accum = tensor_Y_reference.at({n, p, q, k}); ElementCompute bias = ElementCompute(tensor_Broadcast.at({0, 0, 0, k})); if (kAddBroadcastFirst) { reference_op(z, t, accum + bias, beta * ElementCompute(tensor_C_reference.at({n, p, q, k}))); } else { reference_op(z, t, accum, bias); } tensor_Z_reference.at({n, p, q, k}) = z; tensor_T_reference.at({n, p, q, k}) = t; } } } } if (kStoreT) { passed = cutlass::reference::host::TensorEquals( tensor_T_computed.host_view(), tensor_T_reference.host_view()); EXPECT_TRUE(passed); } passed = cutlass::reference::host::TensorEquals( tensor_Z_computed.host_view(), tensor_Z_reference.host_view()); EXPECT_TRUE(passed); if (!passed) { std::stringstream fname; fname << "error_Conv2d_ImplicitGemm_device_" << (split_k_mode == cutlass::conv::SplitKMode::kSerial ? "serial_reduction_" : "parallel_reduction_") << (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kFprop ? "fprop_" : (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDgrad ? "dgrad_" : (Conv2d::kConvolutionalOperator == cutlass::conv::Operator::kDeconv ? "deconv_" : "wgrad_"))) << "nhwc_" << problem_size.N << "x" << problem_size.H << "x" << problem_size.W << "x" << problem_size.C << "_krsc_" << problem_size.K << "x" << problem_size.R << "x" << problem_size.S << "x" << problem_size.C << "_padding_" << problem_size.pad_h << "x" << problem_size.pad_w << "_stride_" << problem_size.stride_h << "x" << problem_size.stride_w << "_dilation_" << problem_size.dilation_h << "x" << problem_size.dilation_w << "_" << (problem_size.mode == cutlass::conv::Mode::kCrossCorrelation ? "xcorr_" : "conv_") << Conv2d::ThreadblockShape::kM << "x" << Conv2d::ThreadblockShape::kN << "x" << Conv2d::ThreadblockShape::kK << "_" << Conv2d::WarpShape::kM << "x" << Conv2d::WarpShape::kN << "x" << Conv2d::WarpShape::kK << ".txt"; std::cout << fname.str() << std::endl; std::ofstream results(fname.str()); results << problem_size << std::endl; results << "\nA:\n" << tensor_A.host_view() << "\n" << "\nB:\n" << tensor_B.host_view() << "\n" << "\nC:\n" << tensor_C.host_view() << "\n" << "\nBroadcast:\n" << tensor_Broadcast.host_view() << "\n" << "\nY reference:\n" << tensor_Y_reference.host_view() << "\n" << "\nT reference:\n" << tensor_T_reference.host_view() << "\n" << "\nT computed:\n" << tensor_T_computed.host_view() << "\n" << "\nZ reference:\n" << tensor_Z_reference.host_view() << "\n" << "\nZ computed:\n" << tensor_Z_computed.host_view() << "\n"; } return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename ImplicitGemm, typename ReferenceOp = Conv2dWithBroadcastReferenceOp<ImplicitGemm>, bool AddBroadcastFirst = false> bool TestSpecificConv2dWithBroadcast( const Conv2dProblemVector & problem_sizes) { bool passed = true; // // Testbed object // TestbedConv2dWithBroadcast<ImplicitGemm, ReferenceOp, AddBroadcastFirst> testbed; // Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0) for(auto conv_problem : problem_sizes) { // // Test // // test mode = xcross passed = testbed.run( conv_problem, cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } // test mode = convolution passed = testbed.run( conv_problem.reset_mode(cutlass::conv::Mode::kConvolution), cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////////////// // TestAllConv: Runs cutlass::conv::device::ImplicitGemmConvolution operator and compares it with reference // TestAllConv runs conv operator on default conv problem sizes from test::conv::device::TestbedConv2dProblemSizes // Additionally, each conv2d test can provide conv problem sizes (conv_test_sizes) and blacklist of sizes // (conv_blacklist_sizes) ///////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename ImplicitGemm, typename ReferenceOp = Conv2dWithBroadcastReferenceOp<ImplicitGemm>, bool AddBroadcastFirst = false, bool TestSplitK = true > bool TestAllConv2dWithBroadcast( const Conv2dProblemVector &conv_test_sizes = Conv2dProblemVector(), const Conv2dProblemVector &conv_blacklist_sizes = Conv2dProblemVector()) { bool passed = true; // // Testbed object // TestbedConv2dWithBroadcast<ImplicitGemm, ReferenceOp, AddBroadcastFirst> testbed; // // Get conv problem sizes to run conv operator // TestbedConv2dProblemSizes conv_problems(128/cutlass::sizeof_bits<typename ImplicitGemm::ElementA>::value); // Vector of conv2d problem sizes to avoid duplicate runs Conv2dProblemVector conv_tested_sizes; Conv2dProblemVector const *problem_vectors[] = { &conv_test_sizes, // run user specified sizes &conv_problems.conv2d_default_sizes, // run default and cudnn bug sizes &conv_problems.conv2d_resnet50_sizes, // run resnet50 sizes #if CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED &conv_problems.conv2d_rigorous_sizes, // run large and rigorous sizes if enabled #endif }; // Sweep conv2d problem sizes (split-k-mode=kSerial, split-k-slice=1, alpha=1.0, beta=0.0) for (Conv2dProblemVector const * problem_vector : problem_vectors) { // Run conv testbed on default convolution sizes for(auto conv_problem : *problem_vector) { // Skip blacklist and avoid duplicate problem sizes if (std::find(conv_blacklist_sizes.begin(), conv_blacklist_sizes.end(), conv_problem) != conv_blacklist_sizes.end() || std::find(conv_tested_sizes.begin(), conv_tested_sizes.end(), conv_problem) != conv_tested_sizes.end()) { continue; } // // Procedurally disable certain cases // // CUTLASS DGRAD's *unity* stride specialization only support stride {1, 1} if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad || ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) && (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport == cutlass::conv::StrideSupport::kUnity)) { if (!((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) { continue; } } #if 0 // relax restrictions on analytic strided dgrad // CUTLASS DGRAD's *strided* specialization only support stride >= {2, 2} if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad || ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) && (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport == cutlass::conv::StrideSupport::kStrided)) { if (((conv_problem.stride_h == 1) && (conv_problem.stride_w == 1))) { continue; } } #endif // // Test // // push back tested problem size to avoid re-running duplicates conv_tested_sizes.push_back(conv_problem); // test mode = xcross passed = testbed.run( conv_problem, cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } // test mode = convolution passed = testbed.run( conv_problem.reset_mode(cutlass::conv::Mode::kConvolution), cutlass::conv::SplitKMode::kSerial); if (!passed) { return false; } } } // CUTLASS DGRAD's *strided* specialization does not support split-k mode if ((ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDgrad || ImplicitGemm::kConvolutionalOperator == cutlass::conv::Operator::kDeconv) && (ImplicitGemm::UnderlyingKernel::Mma::IteratorA::kStrideSupport == cutlass::conv::StrideSupport::kStrided)) { passed = testbed.run( cutlass::conv::Conv2dProblemSize( {1, 56, 56, 8}, // input size (NHWC) {8, 1, 1, 8}, // filter size (KRSC) {0, 0, 0, 0}, // padding (pad_h, _, pad_w, _) {2, 2}, // stride (stride_h, stride_w) {1, 1}), // dilation (dilation_h, dilation_w) cutlass::conv::SplitKMode::kSerial, cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0), cutlass::from_real<typename ImplicitGemm::ElementCompute>(2.0)); if (!passed) { return false; } return passed; } if (!TestSplitK) return passed; // Sweep split-k-slice using serial and prallel reduction with non-unity alpha and non-zero beta for // a single conv2d problem size. Convolution unit tests take a long time to run so only sweep parameters // which are abolutely necessary to catch functional bugs. The below code does provide option to sweep // alpha and beta for local testing, but only runs one value for alpha and beta. cutlass::conv::Conv2dProblemSize conv2d_split_k_test_size ( {1, 17, 11, 288}, // input size (NHWC) {160, 3, 3, 288}, // filter size (KRSC) {1, 1, 1, 1}, // padding (pad_h, _, pad_w, _) {1, 1}, // stride (stride_h, stride_w) {1, 1} // dilation (dilation_h, dilation_w) ); cutlass::conv::SplitKMode split_k_modes [] = { cutlass::conv::SplitKMode::kSerial }; int split_k_slices[] = { 1, 2, 3, 4, 201 }; double problem_alpha[] = { 2.0 }; double problem_beta[] = { 2.0 }; for (auto split_k_mode : split_k_modes) { for (auto split_k_slice : split_k_slices) { for (auto alpha : problem_alpha) { for (auto beta : problem_beta) { passed = testbed.run( conv2d_split_k_test_size.reset_split_k_slices(split_k_slice), split_k_mode, cutlass::from_real<typename ImplicitGemm::ElementCompute>(alpha), cutlass::from_real<typename ImplicitGemm::ElementCompute>(beta)); if (!passed) { return false; } } } } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace conv } // namespace test
cutlass/test/unit/conv/device/conv2d_with_broadcast_testbed.h/0
{ "file_path": "cutlass/test/unit/conv/device/conv2d_with_broadcast_testbed.h", "repo_id": "cutlass", "token_count": 10107 }
47
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Implicit GEMM testbed for 3.x API */ #pragma once #include "cutlass/cutlass.h" #include "../../common/cutlass_unit_test.h" #include "cute/tensor.hpp" #include "cutlass/kernel_hardware_info.hpp" #include "cutlass/conv/convolution.h" #include "cutlass/conv/convnd_problem_shape.hpp" #include "thrust/universal_vector.h" #include "cutlass/util/distribution.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/packed_stride.hpp" #include "cutlass/util/reference/host/conv.hpp" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "conv_problem_sizes.hpp" #include "../cache_testbed_output.h" #include <iostream> #include "cute/layout.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test::conv::device { ///////////////////////////////////////////////////////////////////////////////////////////////// // Initializes a flat device buffer template <typename Element> static void initialize_values( thrust::universal_vector<Element>& dst_ptr, cutlass::Distribution::Kind dist_kind, uint64_t seed) { if (cutlass::Distribution::Uniform == dist_kind) { int scope; int bits = cutlass::sizeof_bits<Element>::value; if (bits <= 8) { scope = 2; } else if (bits == 16) { scope = 4; } else { scope = 8; } cutlass::reference::host::BlockFillRandomUniform( dst_ptr.data().get(), dst_ptr.size(), seed, scope, -scope, 0); } else if (cutlass::Distribution::Identity == dist_kind) { cutlass::reference::host::BlockFillRandomUniform( dst_ptr.data().get(), dst_ptr.size(), seed, 0, 0, 0); } else if (cutlass::Distribution::Gaussian == dist_kind) { cutlass::reference::host::BlockFillRandomGaussian(dst_ptr.data().get(), dst_ptr.size(), seed, 0, 0.5); } else if (cutlass::Distribution::Sequential == dist_kind) { cutlass::reference::host::BlockFillSequential(dst_ptr.data().get(), dst_ptr.size()); } else { std::cerr << "Invalid distribution kind!\n."; exit(1); } } ///////////////////////////////////////////////////////////////////////////////////////////////// template <class Conv> struct ConvTestbed { // Kernel data types using ElementA = typename Conv::ConvKernel::ElementA; using ElementB = typename Conv::ConvKernel::ElementB; using ElementC = cute::conditional_t<cute::is_void_v<typename Conv::ConvKernel::ElementC>, typename Conv::ConvKernel::ElementD, typename Conv::ConvKernel::ElementC>; using ElementD = typename Conv::ConvKernel::ElementD; using ElementAccumulator = typename Conv::ConvKernel::ElementAccumulator; // // FusionOperation derived types/queries // using FusionOp = typename Conv::EpilogueOutputOp; // fusion types are potentially void if the fusion is not supported // helper so we don't try to construct HostTensor with void type template <typename T, typename U = uint8_t> using non_void_t = cute::conditional_t<cute::is_void_v<T>, U, T>; using ElementScalar = typename FusionOp::ElementScalar; using ElementCompute = typename FusionOp::ElementCompute; using BiasType = typename cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithBias<FusionOp>::type; using ElementBias = non_void_t<BiasType>; using ActivationType = non_void_t<typename cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithActivation<FusionOp>::type, cutlass::epilogue::thread::Identity<ElementCompute>>; static constexpr bool IsActivationEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithActivation<FusionOp>::value; using ActivationFunctor = cute::conditional_t<IsActivationEnabled, ActivationType, cutlass::epilogue::thread::Identity<ElementCompute>>; static constexpr bool IsBiasEnabled = cutlass::epilogue::collective::detail::IsThreadEpilogueOpWithBias<FusionOp>::value && !cute::is_same_v<BiasType, void>; using StrideC = typename Conv::ConvKernel::StrideC; using StrideD = typename Conv::ConvKernel::StrideD; using ThreadEpilogueOp = typename Conv::ConvKernel::CollectiveEpilogue::ThreadEpilogueOp; static constexpr cutlass::conv::Operator ConvOp = Conv::DispatchPolicy::ConvOp; static constexpr int NumSpatialDimensions = Conv::NumSpatialDimensions; using ProblemShape = cutlass::conv::ConvProblemShape<ConvOp, NumSpatialDimensions>; using Schedule = typename Conv::DispatchPolicy::Schedule; /// Initialization cutlass::Distribution::Kind init_A = cutlass::Distribution::Uniform; cutlass::Distribution::Kind init_B = cutlass::Distribution::Uniform; cutlass::Distribution::Kind init_C = cutlass::Distribution::Uniform; cutlass::Distribution::Kind init_bias = cutlass::Distribution::Uniform; uint64_t seed = 6090; float epsilon = 0.0f; int split_p_slices = 1; thrust::universal_vector<ElementA> tensor_A; thrust::universal_vector<ElementB> tensor_B; thrust::universal_vector<ElementC> tensor_C; thrust::universal_vector<ElementD> tensor_D_computed; thrust::universal_vector<ElementD> tensor_D_reference; thrust::universal_vector<ElementBias> tensor_bias; thrust::universal_vector<ElementScalar> tensor_alpha; thrust::universal_vector<ElementScalar> tensor_beta; void initialize(ProblemShape const& problem_shape, uint64_t seed = 6090) { tensor_A.resize(sizeof(ElementA) * problem_shape.size_A()); tensor_B.resize(sizeof(ElementB) * problem_shape.size_B()); tensor_C.resize(sizeof(ElementC) * problem_shape.size_C()); tensor_D_computed.resize(sizeof(ElementD) * problem_shape.size_C()); tensor_D_reference.resize(sizeof(ElementD) * problem_shape.size_C()); tensor_bias.resize(sizeof(ElementBias) * cute::size(cute::get<0>(problem_shape.get_shape_B()))); initialize_values(tensor_A, init_A, seed); initialize_values(tensor_B, init_B, seed * 11); initialize_values(tensor_C, init_C, seed * 17); initialize_values(tensor_bias, init_bias, seed * 19); } // Determine SMEM requirements and waive if not satisfied bool sufficient() const { int device_idx; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() API call failed."); } int max_smem_size; result = cudaDeviceGetAttribute(&max_smem_size, cudaDevAttrMaxSharedMemoryPerBlockOptin, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaDeviceGetAttribute() failed"); } return max_smem_size >= Conv::ConvKernel::SharedStorageSize; } /// Executes one test bool run( ProblemShape const& problem_shape, ElementScalar alpha = ElementScalar(1), ElementScalar beta = ElementScalar(0) ) { // Waive test if insufficient CUDA device if (!sufficient()) { if (CUTLASS_TEST_UNIT_ENABLE_WARNINGS) { std::cerr << "Test waived due to insufficient CUDA device.\n"; } return true; } initialize(problem_shape); cutlass::KernelHardwareInfo hw_info; cudaGetDevice(&hw_info.device_id); hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id); // configure the operator Conv conv_op; auto stride_C = StrideC{}; auto stride_D = StrideD{}; if constexpr (ConvOp == cutlass::conv::Operator::kWgrad) { stride_C = cutlass::make_cute_packed_stride( StrideC{}, problem_shape.shape_C, problem_shape.stride_C, ConvOp); stride_D = cutlass::make_cute_packed_stride( StrideD{}, problem_shape.shape_C, problem_shape.stride_C, ConvOp); } // Need to support non-packed output strides for fprop and dgrad kernel. else { cute::for_each(cute::make_seq<cute::rank<0>(StrideC{})>{}, [&](auto i) { cute::get<0, i>(stride_C) = problem_shape.stride_C[ProblemShape::RankT-2-i]; }); cute::for_each(cute::make_seq<cute::rank<0>(StrideD{})>{}, [&](auto i) { cute::get<0, i>(stride_D) = problem_shape.stride_C[ProblemShape::RankT-2-i]; }); } typename Conv::ConvKernel::TileScheduler::Arguments scheduler_args{}; auto args = typename Conv::Arguments { { problem_shape, tensor_A.data().get(), tensor_B.data().get(), }, // MainloopArguments { {}, tensor_C.data().get(), stride_C, tensor_D_computed.data().get(), stride_D, }, // EpilogueArguments hw_info, scheduler_args }; auto &fusion_args = args.epilogue.thread; fusion_args.alpha = alpha; fusion_args.beta = beta; if constexpr (IsBiasEnabled) { fusion_args.bias_ptr = tensor_bias.data().get(); } // Clamp bound if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::Clamp<ElementCompute>>) { fusion_args.activation.lower_bound = CUTLASS_STL_NAMESPACE::numeric_limits<ElementCompute>::lowest(); fusion_args.activation.upper_bound = CUTLASS_STL_NAMESPACE::numeric_limits<ElementCompute>::max(); } // Scale if constexpr (cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU_taylor<ElementCompute>> || cute::is_same_v<ActivationFunctor, cutlass::epilogue::thread::ScaledGELU<ElementCompute>>) { fusion_args.activation.scale = ElementCompute{1}; } cutlass::Status status = cutlass::Status::kInvalid; status = conv_op.can_implement(args); EXPECT_EQ(conv_op.can_implement(args), cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { std::cerr << "can_implement failed for the given problem_shape: \n"; print(problem_shape); return false; } // find workspace requirement for parallel split-k reduction size_t workspace_size = Conv::get_workspace_size(args); thrust::universal_vector<uint8_t> workspace(workspace_size); status = conv_op.initialize(args, workspace.data().get()); if (status != cutlass::Status::kSuccess) { cudaError_t error = cudaGetLastError(); std::cerr << "This test is not supported: " << cudaGetErrorString(error) << "\n"; return true; } // run conv3d operator status = conv_op(); EXPECT_TRUE(status == cutlass::Status::kSuccess); if (status != cutlass::Status::kSuccess) { return false; } bool passed = false; cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " Kernel execution error: " << cudaGetErrorString(result); // Create cute::Tensors using the logical rank-3 MNK multi-mode shapes the mainloop gives us auto shape_mA = cute::reverse(problem_shape.shape_A); auto shape_mB = cute::reverse(problem_shape.shape_B); auto shape_mC = cute::reverse(problem_shape.shape_C); auto shape_mBias = cute::make_shape(cute::size(cute::get<0>(problem_shape.get_shape_B()))); auto stride_mA = cute::reverse(problem_shape.stride_A); auto stride_mB = cute::reverse(problem_shape.stride_B); auto stride_mC = cute::reverse(problem_shape.stride_C); auto mA = make_tensor(tensor_A.data().get(), make_layout(shape_mA, stride_mA)); auto mB = make_tensor(tensor_B.data().get(), make_layout(shape_mB, stride_mB)); auto mC = make_tensor(tensor_C.data().get(), make_layout(shape_mC, stride_mC)); auto mD_ref = make_tensor(tensor_D_reference.data().get(), make_layout(shape_mC, stride_mC)); auto mD_computed = make_tensor(tensor_D_computed.data().get(), make_layout(shape_mC, stride_mC)); auto mBias = make_tensor(tensor_bias.data().get(), make_layout(shape_mBias)); auto mAlpha = make_tensor(tensor_alpha.data().get(), make_layout(shape_mBias)); auto mBeta = make_tensor(tensor_beta.data().get(), make_layout(shape_mBias)); cutlass::reference::host::ConvEpilogueFusionParams< ElementAccumulator, ElementScalar, ElementCompute, ElementC, ElementD, decltype(mAlpha), decltype(mBeta), decltype(mBias), ActivationFunctor> epilogue_fusion_params{}; epilogue_fusion_params.alpha = alpha; epilogue_fusion_params.beta = beta; if constexpr (IsBiasEnabled) { epilogue_fusion_params.tensor_bias = mBias; } auto padding = cute::reverse(problem_shape.lower_padding); auto tstride = cute::reverse(problem_shape.traversal_stride); auto dilation = cute::reverse(problem_shape.dilation); cutlass::reference::host::ConvReferenceImpl< ConvOp, NumSpatialDimensions, decltype(mA), decltype(mB), decltype(mC), decltype(mD_ref), decltype(padding), decltype(tstride), decltype(dilation), decltype(epilogue_fusion_params)> reference_impl(mA, mB, mC, mD_ref, padding, tstride, dilation, epilogue_fusion_params); // // Reference check - support caching results // CachedTestKey cached_test_key = CreateCachedConvNd3xTestKey< ProblemShape, ElementA, ElementB, ElementC, ElementD >( ConvOp, problem_shape, alpha, beta, tensor_A, tensor_B, tensor_C ); // // Look for the cached key // bool cached_result_loaded = false; CachedTestResult cached_test_result; std::string convnd_result_cache_name = std::string("cached_results_") + CUTLASS_TARGET_NAME + ".txt"; #if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) CachedTestResultListing cached_results(convnd_result_cache_name); auto cached = cached_results.find(cached_test_key); cached_result_loaded = cached.first; if (cached_result_loaded) { cached_test_result = cached.second; } #endif if (!cached_result_loaded) { // Compute reference reference_impl.compute_reference(); #if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) cached_test_result.D = TensorHash(tensor_D_reference); CachedTestResultListing cached_results(convnd_result_cache_name); cached_results.append(cached_test_key, cached_test_result); cached_results.write(convnd_result_cache_name); #endif } // if (!cached_result_loaded) #if (CUTLASS_TEST_ENABLE_CACHED_RESULTS) uint32_t tensor_D_computed_hash = TensorHash(tensor_D_computed); passed = (tensor_D_computed_hash == cached_test_result.D); // If hash fails, double check against reference implementation. if(!passed) { std::cerr << "Hash-based comparison unsuccessful for key:" << "\n" << cached_test_key << ", comparing with reference implementation now.\n"; if (cached_result_loaded) { // Compute reference reference_impl.compute_reference(); } // Validate kernel against reference passed = compare_reference(mD_ref, mD_computed, mA, mB, mAlpha, mBeta, mBias, this->epsilon); } #else // Validate kernel against reference passed = compare_reference(mD_ref, mD_computed, mA, mB, mAlpha, mBeta, mBias, this->epsilon); #endif EXPECT_TRUE(passed); return passed; } template< class Engine, class Layout, class EngineA, class LayoutA, class EngineB, class LayoutB, class EngineAlpha, class LayoutAlpha, class EngineBeta, class LayoutBeta, class EngineBias, class LayoutBias> static constexpr bool compare_reference( cute::Tensor<Engine, Layout> const& reference, cute::Tensor<Engine, Layout> const& computed, cute::Tensor<EngineA, LayoutA> const& A, cute::Tensor<EngineB, LayoutB> const& B, cute::Tensor<EngineAlpha, LayoutAlpha> const& tensor_alpha, cute::Tensor<EngineBeta, LayoutBeta> const& tensor_beta, cute::Tensor<EngineBias, LayoutBias> const& tensor_bias, float epsilon = 0.0f) { if (size(reference) != size(computed)) { return false; } bool passed = true; if (epsilon == 0.0f) { // fast refcheck w/o epsilon for (size_t i = 0; i < size_t(size(reference)); ++i) { if (reference(i) != computed(i)) { passed = false; break; } } } else { // refcheck with epsilon for (size_t i = 0; i < size_t(size(reference)); ++i) { auto ref = static_cast<float>(reference(i)); auto act = static_cast<float>(computed(i)); auto abs_error = std::abs(act - ref); auto rel_error = abs_error / (std::max(std::abs(act), std::abs(ref)) + 0.00001f); if (std::isnan(abs_error) || std::isnan(rel_error) || std::min(abs_error, rel_error) > epsilon) { passed = false; break; } } } #if CUTLASS_DEBUG_TRACE_LEVEL > 1 if (not passed) { cute::print("Reference:"); cute::print_tensor(reference); cute::print("\nComputed:"); cute::print_tensor(computed); cute::print("\n"); for (size_t i = 0; i < size_t(size(A)); ++i) { printf("[%ld]: A = %f\n", i, float(A(i))); } for (size_t i = 0; i < size_t(size(B)); ++i) { printf("[%ld]: B = %f\n", i, float(B(i))); } if constexpr (IsBiasEnabled) { for (size_t i = 0; i < size_t(size(tensor_bias)); ++i) { printf("[%ld]: bias = %f\n", i, float(tensor_bias(i))); } } for (size_t i = 0; i < size_t(size(reference)); ++i) { printf("[%ld]: ref = %f, computed = %f\n", i, float(reference(i)), float(computed(i))); } } #endif return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename Conv> bool TestAllConv(double alpha = 1.0, double beta = 0.0, float epsilon = 0.0f) { using ElementScalar = typename Conv::EpilogueOutputOp::ElementScalar; bool passed = true; ConvTestbed<Conv> testbed; testbed.epsilon = epsilon; auto problem_vector = get_conv_problem_vector< Conv::NumSpatialDimensions, Conv::DispatchPolicy::ConvOp>(); for (auto conv_problem : problem_vector) { #if CUTLASS_DEBUG_TRACE_LEVEL > 0 print(conv_problem); #endif passed = testbed.run( conv_problem, cutlass::from_real<ElementScalar>(alpha), cutlass::from_real<ElementScalar>(beta)); if (!passed) { printf("Failed test for "); print(conv_problem); return false; } } return passed; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace test::conv::device /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/conv/device_3x/testbed_conv.hpp/0
{ "file_path": "cutlass/test/unit/conv/device_3x/testbed_conv.hpp", "repo_id": "cutlass", "token_count": 8056 }
48
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for the small matrix class. */ #include <iostream> #include "../common/cutlass_unit_test.h" #include "cutlass/matrix.h" #include "cutlass/core_io.h" ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(Matrix, elementwise_add) { using Matrix4x4 = cutlass::Matrix4x4<float>; Matrix4x4 A = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; Matrix4x4 B = A.transpose(); Matrix4x4 C = A.add(B * 2.125f); bool passed = true; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { float got = C.at(i, j); float expected = A.at(i, j) + A.at(j, i) * 2.125f; if (got != expected) { passed = false; } } } EXPECT_TRUE(passed); if (!passed) { std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << std::endl; } } TEST(Matrix, elementwise_multiply) { using Matrix4x4 = cutlass::Matrix4x4<float>; Matrix4x4 A = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; Matrix4x4 B = A.transpose(); Matrix4x4 C = A.multiply(B); bool passed = true; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { float got = C.at(i, j); float expected = A.at(i, j) * A.at(j, i); if (got != expected) { passed = false; } } } EXPECT_TRUE(passed); if (!passed) { std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << std::endl; } } TEST(Matrix, product_4x4_overloads) { using Matrix4x4 = cutlass::Matrix4x4<float>; Matrix4x4 A = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; Matrix4x4 B = { -1, -2, 0, 4, 1, 2, 1, 1, 3, 2, 1, 1, 1, 0, 8, 2 }; Matrix4x4 C = Matrix4x4::identity(); Matrix4x4 D = A * B + C; bool passed = true; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { float got = D.at(i, j); float expected = (i == j ? 1.0f : 0); for (int k = 0; k < 4; ++k) { expected += A.at(i, k) * B.at(k, j); } if (got != expected) { passed = false; } } } EXPECT_TRUE(passed); if (!passed) { std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << "\n\nD:\n" << D << std::endl; } } TEST(Matrix, product_4x4) { using Matrix4x4 = cutlass::Matrix4x4<float>; Matrix4x4 A = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; Matrix4x4 B = { -1, -2, 0, 4, 1, 2, 1, 1, 3, 2, 1, 1, 1, 0, 8, 2 }; Matrix4x4 C = Matrix4x4::identity(); // Compute product with optional source accumulator Matrix4x4 D = A.product(B, C); bool passed = true; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { float got = D.at(i, j); float expected = (i == j ? 1.0f : 0.0f); for (int k = 0; k < 4; ++k) { expected += A.at(i, k) * B.at(k, j); } if (got != expected) { passed = false; } } } EXPECT_TRUE(passed); if (!passed) { std::cout << "A:\n" << A << "\n\nB:\n" << B << "\n\nC:\n" << C << "\n\nD:\n" << D << std::endl; } for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { float c = (i == j ? 1.0f : 0.0f); EXPECT_TRUE(A.row(i).dot(B.column(j)) + c == D.at(i, j)); } } } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/test/unit/core/matrix.cu/0
{ "file_path": "cutlass/test/unit/core/matrix.cu", "repo_id": "cutlass", "token_count": 2184 }
49
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include <iostream> #include <iomanip> #include <utility> #include <type_traits> #include <vector> #include <numeric> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> using namespace cute; template <class ElementType, class SmemLayout> struct SharedStorage { cute::ArrayEngine<ElementType, cute::cosize_v<SmemLayout>> smem; }; template <class T, class TiledCopy, class GmemLayout, class SmemLayout> __global__ void test_tiled_cp_async_device_cute(T const* g_in, T* g_out, TiledCopy const tiled_copy, GmemLayout gmem_layout, SmemLayout smem_layout) { using namespace cute; extern __shared__ char shared_memory[]; using SharedStorage = SharedStorage<T, SmemLayout>; SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory); auto thr_copy = tiled_copy.get_slice(threadIdx.x); Tensor gA = make_tensor(make_gmem_ptr(g_in), gmem_layout); Tensor gB = make_tensor(make_gmem_ptr(g_out), gmem_layout); // Construct SMEM tensor Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.begin()), smem_layout); auto tAgA = thr_copy.partition_S(gA); auto tAsA = thr_copy.partition_D(sA); #if 0 if (thread0()) { print("gA : "); print(gA.layout()); print("\n"); print("sA : "); print(sA.layout()); print("\n"); print("tAgA: "); print(tAgA.layout()); print("\n"); print("tAsA: "); print(tAsA.layout()); print("\n"); } #endif copy(tiled_copy, tAgA, tAsA); cp_async_fence(); cp_async_wait<0>(); __syncthreads(); // Store trivially smem -> gmem if (thread0()) { copy(sA, gB); } } template <class T, class TiledCopy, class GMEM_Layout, class SMEM_Layout> void test_tiled_cp_async( TiledCopy const tiled_copy, GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout) { using namespace cute; // Allocate and initialize host test data size_t N = ceil_div(cosize(gmem_layout) * sizeof_bits<T>::value, 8); thrust::host_vector<T> h_in(N); Tensor hA_in = make_tensor(recast_ptr<T>(h_in.data()), gmem_layout); for (int i = 0; i < size(hA_in); ++i) { hA_in(i) = static_cast<T>(i % 13); } // Allocate and initialize device test data thrust::device_vector<T> d_in = h_in; thrust::device_vector<T> d_out(h_in.size(), T(-1)); // Launch int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>)); test_tiled_cp_async_device_cute<<<1, 128, smem_size>>>( reinterpret_cast<T const*>(raw_pointer_cast(d_in.data())), reinterpret_cast<T*> (raw_pointer_cast(d_out.data())), tiled_copy, gmem_layout, smem_layout); // Copy results back to host thrust::host_vector<T> h_out = d_out; Tensor hA_out = make_tensor(recast_ptr<T>(h_out.data()), gmem_layout); // Validate the results. Print only the first 3 errors. int count = 3; for (int i = 0; i < size(hA_out) && count > 0; ++i) { EXPECT_EQ(hA_in(i), hA_out(i)); if (hA_in(i) != hA_out(i)) { --count; } } } template <typename T, typename M, typename N, typename GMEM_STRIDE_TYPE, typename SMEM_LAYOUT, typename TILED_COPY> void test_cp_async_no_swizzle() { using namespace cute; auto smem_atom = SMEM_LAYOUT{}; auto smem_layout = tile_to_shape(smem_atom, Shape<M, N>{}); auto gmem_layout = make_layout(make_shape(M{}, N{}), GMEM_STRIDE_TYPE{}); test_tiled_cp_async<T>(TILED_COPY{}, gmem_layout, smem_layout); } template <typename T, typename M, typename N, typename GMEM_STRIDE_TYPE, typename SWIZZLE_ATOM, typename SMEM_LAYOUT, typename TILED_COPY> void test_cp_async_with_swizzle() { using namespace cute; auto swizzle_atom = SWIZZLE_ATOM{}; auto smem_atom = composition(swizzle_atom, SMEM_LAYOUT{}); auto smem_layout = tile_to_shape(smem_atom, Shape<M, N>{}); auto gmem_layout = make_layout(make_shape(M{}, N{}), GMEM_STRIDE_TYPE{}); test_tiled_cp_async<T>(TILED_COPY{}, gmem_layout, smem_layout); }
cutlass/test/unit/cute/ampere/tiled_cp_async_testbed.hpp/0
{ "file_path": "cutlass/test/unit/cute/ampere/tiled_cp_async_testbed.hpp", "repo_id": "cutlass", "token_count": 2168 }
50
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "cutlass_unit_test.h" #include "../hopper/tma_store_testbed.hpp" using namespace cute; using namespace cutlass::test; #if CUDA_12_0_SM90_FEATURES_SUPPORTED template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout, class CTA_Tile> void test_tma_store(GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout, CTA_Tile const& cta_tile) { using namespace cute; return test_tma_store<T, TmaType>(SM90_TMA_STORE{}, gmem_layout, smem_layout, cta_tile); } template <class T, class TmaType = T, class GMEM_Layout, class SMEM_Layout> void test_tma_store(GMEM_Layout const& gmem_layout, SMEM_Layout const& smem_layout) { using namespace cute; return test_tma_store<T, TmaType>(gmem_layout, smem_layout, product_each(shape(smem_layout))); } TEST(SM90_CuTe_Hopper, Tma_Load_1D) { Layout smem_layout = Layout<_256, _1>{}; { Layout gmem_layout = smem_layout; test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(128, GenColMajor{}); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } } TEST(SM90_CuTe_Hopper, Tma_Store_32x32_Col) { Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{}; { Layout gmem_layout = smem_layout; test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), GenColMajor{}); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), make_stride(Int<1>{}, 1024)); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } } TEST(SM90_CuTe_Hopper, Tma_Store_32x32_Row) { Layout smem_layout = Layout<Shape<_32,_32>, Stride<_32,_1>>{}; { Layout gmem_layout = smem_layout; test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), GenRowMajor{}); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } { Layout gmem_layout = make_layout(make_shape(32,32), make_stride(1024, Int<1>{})); test_tma_store<int8_t>(gmem_layout, smem_layout); test_tma_store<half_t>(gmem_layout, smem_layout); test_tma_store< float>(gmem_layout, smem_layout); test_tma_store<double>(gmem_layout, smem_layout); } } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_atom_mn() { auto smem_layout = SWIZZLE_ATOM<T>{}; Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenColMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_atom_k() { auto smem_layout = SWIZZLE_ATOM<T>{}; Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenRowMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } TEST(SM90_CuTe_Hopper, Tma_Store_Swizzle_Atoms) { test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_atom_mn<int8_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_mn<half_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_mn< float, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_mn<double, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_atom_k<int8_t, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_atom_k<half_t, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_atom_k< float, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_atom_k<double, GMMA::Layout_K_INTER_Atom>(); } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_tile_mn() { auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{}); Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenColMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } template <class T, template <typename> typename SWIZZLE_ATOM> void test_tma_store_swizzle_tile_k() { auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{}); Layout gmem_layout = make_layout(make_shape(2*size<0>(smem_layout), 2*size<1>(smem_layout)), GenRowMajor{}); return test_tma_store<T>(gmem_layout, smem_layout); } TEST(SM90_CuTe_Hopper, Tma_Store_Swizzle_Tiles) { // Other T-types use too much smem test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW128_Atom>(); test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW64_Atom>(); test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW32_Atom>(); test_tma_store_swizzle_tile_mn<int8_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_tile_mn<half_t, GMMA::Layout_MN_INTER_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW128_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW64_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_SW32_Atom>(); test_tma_store_swizzle_tile_k<int8_t, GMMA::Layout_K_INTER_Atom>(); test_tma_store_swizzle_tile_k<half_t, GMMA::Layout_K_INTER_Atom>(); } // Tensor by-mode TEST(SM90_CuTe_Hopper, Tma_Store_Tensor) { // 3-mode TMA { Layout gmem_layout = make_layout(make_shape(128, 64, 5)); auto cta_tile = Shape<_64, _32>{}; // GMEM Tiling: // Take 64-elem from m // Take 32-elem from k auto smem_layout = make_layout(Shape<_64,_32>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } // 4-mode TMA { Layout gmem_layout = make_layout(make_shape(make_shape(80,40),make_shape(32,12))); auto cta_tile = Shape<Shape<_16,_8>,Shape<_32,_2>>{}; // GMEM Tiling: // Take 16-elem from m0, 8-elem from m1, // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_128,_64>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } // 5-mode TMA { Layout gmem_layout = make_layout(make_shape(make_shape(32,32,32),make_shape(32,12))); auto cta_tile = Shape<Shape<_16,_4,_2>,Shape<_16,_2>>{}; // GMEM Tiling: // Take 4-elem from m0, 4-elem from m1, 5-elem from m2 // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_128,_32>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } } // Tensor Multimode -- TMA with more than 5 modes in GMEM (packs residual modes into last TMA mode) TEST(SM90_CuTe_Hopper, Tma_Store_Tensor_Multimode) { { Layout gmem_layout = make_layout(make_shape(make_shape(32,3,2,2),make_shape(32,4,2))); auto cta_tile = Shape<Shape<_32>, Shape<_32,_2>>{}; // GMEM Tiling: // Take 32-elem from m0 // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_32,_64>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } { Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,2),make_shape(32,4,2))); auto cta_tile = Shape<Shape<_32,_3>, Shape<_32,_2>>{}; // GMEM Tiling: // Take 32-elem from m0, 3-elem from m1 // Take 32-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_96,_64>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } { Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,3,2),make_shape(32,4,2,2))); auto cta_tile = Shape<Shape<_32>, Shape<_16,_2>>{}; // GMEM Tiling: // Take 32-elem from m0 // Take 16-elem from k0, 2-elem from k1 auto smem_layout = make_layout(Shape<_32,_32>{}); test_tma_store<half_t>(gmem_layout, smem_layout, cta_tile); } } #endif
cutlass/test/unit/cute/hopper/tma_store.cu/0
{ "file_path": "cutlass/test/unit/cute/hopper/tma_store.cu", "repo_id": "cutlass", "token_count": 5975 }
51
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cute/atom/mma_atom.hpp" #include "cute/atom/copy_atom.hpp" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/arch/arch.h" #include "cutlass/arch/mma.h" #include "cutlass/layout/layout.h" #include "cutlass/gemm/dispatch_policy.hpp" #include "cutlass/gemm/collective/collective_mma.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" namespace cutlass { namespace gemm { namespace device { using namespace cute; // This type is only intended to demonstrate porting 2.x kernels to 3.0 template< class OperatorClass, class ArchTag, class ElementA, class LayoutA, class ElementB, class LayoutB, class ElementC, class LayoutC, class ElementAccumulator> struct DefaultGemmConfigurationToCutlass3Types { static_assert(sizeof(ElementA) == 0, "No valid DefaultGemmConfigurationToCutlass3Types configuration exists."); }; /////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename Element, typename Layout, int Alignment, int SizeK> struct DefaultGemm_TensorOpSm80_OperandA; template <typename Element, typename Layout, int Alignment, int SizeK> struct DefaultGemm_TensorOpSm80_OperandB; // // F16: 128-by-128-by-64 // /// Operand A - Row-major (K-Major) template <> struct DefaultGemm_TensorOpSm80_OperandA<half_t, layout::RowMajor, 8, 64> { // Smem using SmemLayoutAtom = decltype( composition(Swizzle<3,3,3>{}, Layout<Shape < _8,_64>, Stride<_64, _1>>{})); using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, half_t>; // Gmem using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, half_t>{}, Layout<Shape <_16,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_8>>{})); }; /// Operand A - Column-major (M-major) template <int SizeK> struct DefaultGemm_TensorOpSm80_OperandA<half_t, layout::ColumnMajor, 8, SizeK> { // Smem using SmemLayoutAtom = decltype( composition(Swizzle<3,3,3>{}, Layout<Shape <_64, _8>, Stride< _1,_64>>{})); using SmemCopyAtom = Copy_Atom<SM75_U16x8_LDSM_T, half_t>; // Gmem using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, half_t>{}, Layout<Shape <_16, _8>, Stride< _1,_16>>{}, Layout<Shape < _8, _1>>{})); }; // Because the F32F16 TiledMMA is A-B symmetric, we can reuse the DefaultOperands // Operand B - Column-Major (K-major) template <int Alignment, int SizeK> struct DefaultGemm_TensorOpSm80_OperandB<half_t, layout::ColumnMajor, Alignment, SizeK> : DefaultGemm_TensorOpSm80_OperandA<half_t, layout::RowMajor, Alignment, SizeK> {}; // Operand B - Row-Major (N-major) template <int Alignment, int SizeK> struct DefaultGemm_TensorOpSm80_OperandB<half_t, layout::RowMajor, Alignment, SizeK> : DefaultGemm_TensorOpSm80_OperandA<half_t, layout::ColumnMajor, Alignment, SizeK> {}; // // F16: 128-by-128-by-32 (small k-block) // /// Operand A - Row-major (K-Major) template <> struct DefaultGemm_TensorOpSm80_OperandA<half_t, layout::RowMajor, 8, 32> { // Smem using SmemLayoutAtom = decltype( composition(Swizzle<2,3,3>{}, Layout<Shape < _8,_32>, Stride<_32, _1>>{})); using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, half_t>; // Gmem using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, half_t>{}, Layout<Shape <_32,_4>, Stride< _4,_1>>{}, Layout<Shape < _1,_8>>{})); }; } /////////////////////////////////////////////////////////////////////////////// // Ampere MMA F32F16 template <typename LayoutA, typename LayoutB, typename LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, half_t, LayoutA, half_t, LayoutB, float, LayoutC, float> { using TileShape = Shape<_128, _128, _32>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_16x8x16_F32F16F16F32_TN>, Layout<Shape<_2,_2,_1>>, // 2x2x1 thread group Tile<_32,_32,_16>>; // 32x32x16 MMA for LDSM, 1x2x1 value group // A static constexpr int kAlignmentA = 8; using DefaultOperandA = detail::DefaultGemm_TensorOpSm80_OperandA< half_t, LayoutA, kAlignmentA, 32>; using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; // M, K using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom; using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy; // B static constexpr int kAlignmentB = 8; using DefaultOperandB = detail::DefaultGemm_TensorOpSm80_OperandB< half_t, LayoutB, kAlignmentB, 32>; using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; // N, K using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom; using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy; // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, half_t, TagToStrideA_t<LayoutA>, half_t, TagToStrideB_t<LayoutB>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<float, 1, float, float>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// namespace detail { // // TF32: 128-by-128-by-kblock (kBlock = 16, 32) // /// Operand A - Row-major (K-major) (kBlock = 32) template <> struct DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::RowMajor, 4, 32> { // Smem using SmemLayoutAtom = decltype( composition(Swizzle<3,2,3>{}, Layout<Shape < _8,_32>, Stride<_32, _1>>{})); using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, tfloat32_t>; // Gmem using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{}, Layout<Shape <_16,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_4>>{})); }; /// Operand A - Row-major (K-major) (kBlock = 16) template <> struct DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::RowMajor, 4, 16> { // Smem using SmemLayoutAtom = decltype( composition(Swizzle<2,2,3>{}, Layout<Shape < _8,_16>, Stride<_16, _1>>{})); using SmemCopyAtom = Copy_Atom<SM75_U32x4_LDSM_N, tfloat32_t>; // Gmem using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{}, Layout<Shape <_32,_4>, Stride< _4,_1>>{}, Layout<Shape < _1,_4>>{})); }; /// Operand A - Column-major (M-major) template <int SizeK> struct DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::ColumnMajor, 4, SizeK> { // Smem using SmemLayoutAtom = decltype( composition(Swizzle<3,2,3>{}, Layout<Shape <_32, _8>, Stride< _1,_32>>{})); using SmemCopyAtom = Copy_Atom<UniversalCopy<tfloat32_t>, tfloat32_t>; // Gmem using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, tfloat32_t>{}, Layout<Shape <_16, _8>, Stride< _1,_16>>{}, Layout<Shape < _4, _1>>{})); }; // Because the TF32 TiledMMA is A-B symmetric, we can reuse the DefaultOperands // Operand B - Column-Major (K-major) template <int Alignment, int SizeK> struct DefaultGemm_TensorOpSm80_OperandB<tfloat32_t, layout::ColumnMajor, Alignment, SizeK> : DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::RowMajor, Alignment, SizeK> {}; // Operand B - Row-Major (N-major) template <int Alignment, int SizeK> struct DefaultGemm_TensorOpSm80_OperandB<tfloat32_t, layout::RowMajor, Alignment, SizeK> : DefaultGemm_TensorOpSm80_OperandA<tfloat32_t, layout::ColumnMajor, Alignment, SizeK> {}; } /////////////////////////////////////////////////////////////////////////////// // Ampere MMA F32TF32 template <typename LayoutA, typename LayoutB, typename LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, tfloat32_t, LayoutA, tfloat32_t, LayoutB, float, LayoutC, float> { using TileShape = Shape<_128, _128, _32>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_16x8x8_F32TF32TF32F32_TN>, Layout<Shape<_2,_2,_1>, Stride<_2, _1, _1>>, // 2x2x1 thread group Tile<_32,_32,_8>>; // 32x32x8 MMA for LDSM, 1x2x1 value group // A static constexpr int kAlignmentA = 4; using DefaultOperandA = detail::DefaultGemm_TensorOpSm80_OperandA< tfloat32_t, LayoutA, kAlignmentA, 32>; using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; // M, K using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom; using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy; // B static constexpr int kAlignmentB = 4; using DefaultOperandB = detail::DefaultGemm_TensorOpSm80_OperandB< tfloat32_t, LayoutB, kAlignmentB, 32>; using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; // N, K using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom; using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy; // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, tfloat32_t, TagToStrideA_t<LayoutA>, tfloat32_t, TagToStrideB_t<LayoutB>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<float, 1, float, float>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// template <typename LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, int32_t, LayoutC, int32_t> { using TileShape = Shape<_128, _128, _64>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_16x8x32_S32S8S8S32_TN>, Layout<Shape<_2,_2,_1>>, // 2x2x1 thread group Tile<_32,_32,_32>>; // 16x16x32 MMA for LDSM, 1x2x1 value group // A (M,K) K-major using SmemLayoutAtomA = decltype( composition( Swizzle<2,4,3>{}, Layout<Shape <_16,_64>, Stride<_64, _1>>{})); static constexpr int kAlignmentA = 16; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, int8_t>{}, Layout<Shape <_32,_4>, Stride< _4,_1>>{}, Layout<Shape<_1,Int<kAlignmentA>>>{})); // LDS.32- or LDSM-based copy atom // using SmemCopyAtomA = Copy_Atom<DefaultCopy, uint8_t>; using SmemCopyAtomA = Copy_Atom<SM75_U32x4_LDSM_N, uint8_t>; // LDSM works // B (N,K) K-major using SmemLayoutAtomB = decltype( composition( Swizzle<2,4,3>{}, Layout<Shape <_16,_64>, Stride<_64, _1>>{})); static constexpr int kAlignmentB = 16; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, int8_t>{}, Layout<Shape <_32,_4>, Stride< _4,_1>>{}, Layout<Shape<_1,Int<kAlignmentB>>>{})); // LDS.32- or LDSM-based copy atom // using SmemCopyAtomB = Copy_Atom<DefaultCopy, uint32_t>; using SmemCopyAtomB = Copy_Atom<SM75_U32x4_LDSM_N, uint8_t>; // LDSM works // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, int8_t, TagToStrideA_t<cutlass::layout::RowMajor>, int8_t, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<int32_t, 1, int32_t, int32_t>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// //////////////////////////// SIMT TWO STAGE /////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// namespace detail { template <typename Element, typename Layout, int ThreadCount, int ShapeM, int ShapeK> struct DefaultGemm_Simt_OperandA; /////////////////////////////////////////////////////////////////////////////// template <typename Element> struct DefaultGemm_Simt_OperandA<Element, layout::ColumnMajor, 256, 128, 8> { using SmemLayoutAtom = Layout<Shape <_128, _8>, Stride< _1,_128>>; using SmemCopyAtom = Copy_Atom<DefaultCopy, Element>; using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<Element>, Element>{}, Layout<Shape <_32, _8>, Stride< _1,_32>>{}, Layout<Shape<_1,_1>>{})); }; template <typename Element> struct DefaultGemm_Simt_OperandA<Element, layout::RowMajor, 256, 128, 8> { using SmemLayoutAtom = Layout<Shape <_128, _8>, Stride< _1,Int<128 + 4>>>; // Padded using SmemCopyAtom = Copy_Atom<DefaultCopy, Element>; using GmemTiledCopy = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<Element>, Element>{}, Layout<Shape <_32, _8>, Stride< _8, _1>>{}, Layout<Shape<_1,_1>>{})); }; template <typename Element, typename Layout, int ThreadCount, int ShapeN, int ShapeK> struct DefaultGemm_Simt_OperandB; template <typename Element, int ThreadCount, int ShapeN, int ShapeK> struct DefaultGemm_Simt_OperandB<Element, layout::ColumnMajor, ThreadCount, ShapeN, ShapeK> : DefaultGemm_Simt_OperandA<Element, layout::RowMajor, ThreadCount, ShapeN, ShapeK> {}; template <typename Element, int ThreadCount, int ShapeN, int ShapeK> struct DefaultGemm_Simt_OperandB<Element, layout::RowMajor, ThreadCount, ShapeN, ShapeK> : DefaultGemm_Simt_OperandA<Element, layout::ColumnMajor, ThreadCount, ShapeN, ShapeK> {}; } // end namespace detail // SIMT Two Stage template < class ArchTag, class ElementA, class LayoutA, class ElementB, class LayoutB, class ElementC, class LayoutC, class ElementAccumulator> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, ArchTag, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ElementAccumulator> { using TileShape = Shape<_128, _128, _8>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm70TwoStage; using TiledMma = TiledMMA< MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>, Layout<Shape<_16, _16, _1>>>; // A static constexpr int kAlignmentA = 1; using DefaultOperandA = detail::DefaultGemm_Simt_OperandA<ElementA, LayoutA, ThreadCount, 128, 8>; using SmemLayoutAtomA = typename DefaultOperandA::SmemLayoutAtom; using SmemCopyAtomA = typename DefaultOperandA::SmemCopyAtom; using GmemTiledCopyA = typename DefaultOperandA::GmemTiledCopy; // B static constexpr int kAlignmentB = 1; using DefaultOperandB = detail::DefaultGemm_Simt_OperandB<ElementB, LayoutB, ThreadCount, 128, 8>; using SmemLayoutAtomB = typename DefaultOperandB::SmemLayoutAtom; using SmemCopyAtomB = typename DefaultOperandB::SmemCopyAtom; using GmemTiledCopyB = typename DefaultOperandB::GmemTiledCopy; // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<LayoutA>, ElementB, TagToStrideB_t<LayoutB>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>, cutlass::gemm::EpilogueDefault>; }; // // DP4A - int8 Proof-of-concept // // SIMT Two Stage TN - idp4a template < class ArchTag, class ElementC, class LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, ArchTag, int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::ColumnMajor, ElementC, LayoutC, int32_t> { using TileShape = Shape<_128, _128, _32>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm70TwoStage; // NOTE: permuting MMA M mode lets us generate 128b smem loads (LDS.128) but has worst case bank conflicts using TiledMma = TiledMMA< MMA_Atom<SM61_DP4A>, Layout<Shape<_16,_16,_1>>>; // Tile of atoms (threads) // A (M,K) K-major using ElementA = int8_t; // 40% from regular M and N major layout // using SmemLayoutAtomA = Layout<Shape <_128,_32>, // Stride< _1,_128>>; // 80% from interleaved layouts using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 4; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementA>{}, Layout<Shape <_32,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_4>>{})); // B (N,K) K-major using ElementB = int8_t; // 40% from regular M and N major layout // using SmemLayoutAtomB = Layout<Shape <_128,_32>, // Stride< _1,_128>>; // 80% from interleaved layouts using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 4; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementB>{}, Layout<Shape <_32,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_4>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::RowMajor>, ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // SIMT Two Stage NN - idp4a template < class ArchTag, class ElementC, class LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, ArchTag, int8_t, cutlass::layout::ColumnMajor, int8_t, cutlass::layout::ColumnMajor, ElementC, LayoutC, int32_t> { using TileShape = Shape<_128, _128, _32>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm70TwoStage; using TiledMma = TiledMMA< MMA_Atom<SM61_DP4A>, Layout<Shape<_16, _16, _1>>>; // A (M,K) M-major using ElementA = int8_t; using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 1; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementA>{}, Layout<Shape <_32, _8>, Stride< _1,_32>>{}, Layout<Shape < _1, _1>>{})); // B (N,K) K-major using ElementB = int8_t; using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 4; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementB>{}, Layout<Shape <_32,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_4>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>, ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // SIMT Two Stage NT - idp4a template < class ArchTag, class ElementC, class LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, ArchTag, int8_t, cutlass::layout::ColumnMajor, int8_t, cutlass::layout::RowMajor, ElementC, LayoutC, int32_t> { using TileShape = Shape<_128, _128, _32>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm70TwoStage; using TiledMma = TiledMMA< MMA_Atom<SM61_DP4A>, Layout<Shape<_16, _16, _1>>>; // A (M,K) M-major using ElementA = int8_t; using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 1; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementA>{}, Layout<Shape <_32, _8>, Stride< _1,_32>>{}, Layout<Shape < _1, _1>>{})); // B (N,K) N-major using ElementB = int8_t; using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 1; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementB>{}, Layout<Shape <_32, _8>, Stride< _1,_32>>{}, Layout<Shape < _1, _1>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>, ElementB, TagToStrideB_t<cutlass::layout::RowMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // SIMT Two Stage TT - idp4a template < class ArchTag, class ElementC, class LayoutC> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, ArchTag, int8_t, cutlass::layout::RowMajor, int8_t, cutlass::layout::RowMajor, ElementC, LayoutC, int32_t> { using TileShape = Shape<_128, _128, _32>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm70TwoStage; using TiledMma = TiledMMA< MMA_Atom<SM61_DP4A>, Layout<Shape<_16, _16, _1>>>; // A (M,K) K-major using ElementA = int8_t; using SmemLayoutAtomA = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 4; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint32_t>, ElementA>{}, Layout<Shape <_32,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_4>>{})); // B (N,K) N-major using ElementB = int8_t; using SmemLayoutAtomB = Layout<Shape <_128, Shape <_4, _8>>, Stride< _4, Stride<_1,_512>>>; using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 1; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<UniversalCopy<cute::uint8_t>, ElementB>{}, Layout<Shape <_32, _8>, Stride< _1,_32>>{}, Layout<Shape < _1, _1>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::RowMajor>, ElementB, TagToStrideB_t<cutlass::layout::RowMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, int32_t, int32_t>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// /////////////////////////// SIMT MULTI STAGE ////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// // SIMT Multi Stage NT template < class ElementA, class ElementB, class ElementC, class LayoutC, class ElementAccumulator> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, arch::Sm80, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::RowMajor, ElementC, LayoutC, ElementAccumulator> { using TileShape = Shape<_128, _128, _16>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>, Layout<Shape<_16, _16, _1>>, // 16x16x1 thread group Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x1 MMA with perm for load vectorization Layout<Shape<_16,_2>,Stride<_2,_1>>,Underscore>>; // A (M,K) M-major using SmemLayoutAtomA = Layout<Shape<_128,_16>>; using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 2; using AlignmentTypeA = cute::uint_byte_t<static_cast<int>(sizeof(ElementA)) * kAlignmentA>; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeA>, ElementA>{}, Layout<Shape<_32,_8>>{}, Layout<Shape< _2,_1>>{})); // B (N,K) N-major using SmemLayoutAtomB = Layout<Shape<_128,_16>>; using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 2; using AlignmentTypeB = cute::uint_byte_t<static_cast<int>(sizeof(ElementB)) * kAlignmentB>; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeB>, ElementB>{}, Layout<Shape<_32,_8>>{}, Layout<Shape< _2,_1>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>, ElementB, TagToStrideB_t<cutlass::layout::RowMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // SIMT Multi Stage TN template < class ElementA, class ElementB, class ElementC, class LayoutC, class ElementAccumulator> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, arch::Sm80, ElementA, cutlass::layout::RowMajor, ElementB, cutlass::layout::ColumnMajor, ElementC, LayoutC, ElementAccumulator> { using TileShape = Shape<_128, _128, _16>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>, Layout<Shape<_16, _16, _1>>>; // A (M,K) K-major using SmemLayoutAtomA = Layout<Shape <_128, _16>, Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentA using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 1; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementA>, ElementA>{}, Layout<Shape <_16,_16>, Stride<_16, _1>>{})); // B (N,K) K-major using SmemLayoutAtomB = Layout<Shape <_128, _16>, Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentB using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 1; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementB>, ElementB>{}, Layout<Shape <_16,_16>, Stride<_16, _1>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::RowMajor>, ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // SIMT Multi Stage NN template < class ElementA, class ElementB, class ElementC, class LayoutC, class ElementAccumulator> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, arch::Sm80, ElementA, cutlass::layout::ColumnMajor, ElementB, cutlass::layout::ColumnMajor, ElementC, LayoutC, ElementAccumulator> { using TileShape = Shape<_128, _128, _16>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>, Layout<Shape<_16, _16, _1>>, // 16x16x1 thread group Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>,Underscore,Underscore>>; // 32x16x1 MMA with perm for load vectorization // A (M,K) M-major using SmemLayoutAtomA = Layout<Shape<_128,_16>>; using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 2; using AlignmentTypeA = cute::uint_byte_t<static_cast<int>(sizeof(ElementA)) * kAlignmentA>; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeA>, ElementA>{}, Layout<Shape<_32,_8>>{}, Layout<Shape< _2,_1>>{})); // B (N,K) K-major using SmemLayoutAtomB = Layout<Shape <_128, _16>, Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentB using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 1; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementB>, ElementB>{}, Layout<Shape <_16,_16>, Stride<_16, _1>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::ColumnMajor>, ElementB, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // SIMT Multi Stage TT template < class ElementA, class ElementB, class ElementC, class LayoutC, class ElementAccumulator> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassSimt, arch::Sm80, ElementA, cutlass::layout::RowMajor, ElementB, cutlass::layout::RowMajor, ElementC, LayoutC, ElementAccumulator> { using TileShape = Shape<_128, _128, _16>; static constexpr int ThreadCount = 256; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<UniversalFMA<ElementAccumulator, ElementA, ElementB, ElementC>>, Layout<Shape<_16, _16, _1>>, // 16x16x1 thread group Tile<Underscore,Layout<Shape<_16,_2>,Stride<_2,_1>>,Underscore>>; // 16x32x1 MMA with perm for load vectorization // A (M,K) K-major using SmemLayoutAtomA = Layout<Shape <_128, _16>, Stride< _1, Int<128 + 1>>>; // Padded by kAlignmentA using SmemCopyAtomA = Copy_Atom<DefaultCopy, ElementA>; static constexpr int kAlignmentA = 1; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<ElementA>, ElementA>{}, Layout<Shape <_16,_16>, Stride<_16, _1>>{})); // B (N,K) N-major using SmemLayoutAtomB = Layout<Shape <_128,_16>>; using SmemCopyAtomB = Copy_Atom<DefaultCopy, ElementB>; static constexpr int kAlignmentB = 2; using AlignmentTypeB = cute::uint_byte_t<static_cast<int>(sizeof(ElementB)) * kAlignmentB>; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<AlignmentTypeB>, ElementB>{}, Layout<Shape<_32,_8>>{}, Layout<Shape< _2,_1>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, ElementA, TagToStrideA_t<cutlass::layout::RowMajor>, ElementB, TagToStrideB_t<cutlass::layout::RowMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<LayoutC>, TagToStrideC_t<LayoutC>, epilogue::thread::LinearCombination<ElementC, 1, ElementAccumulator, ElementAccumulator>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // Ampere fp64 MMA TN (K-Major A and K-Major B) template <> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, double, cutlass::layout::RowMajor, double, cutlass::layout::ColumnMajor, double, cutlass::layout::ColumnMajor, double> { using TileShape = Shape<_128, _64, _16>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom Layout<Shape<_2,_2,_1>>, // Atom layout Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization Layout<Shape<_16,_2>,Stride<_2,_1>>, Underscore>>; // A (M,K) K-Major using SmemLayoutAtomA = decltype( composition(Swizzle<2,0,4>{}, Layout<Shape <_4,_16>, Stride<_1, _4>>{})); // M, K using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentA = 1; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom Layout<Shape < _8,_16>, Stride<_16, _1>>{}, // ThrLayout for CopyAtom Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles // B (N,K) K-Major using SmemLayoutAtomB = decltype( composition(Swizzle<2,0,4>{}, Layout<Shape <_4,_16>, Stride<_1, _4>>{})); // N, K using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentB = 1; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom Layout<Shape < _8,_16>, Stride<_16, _1>>{}, // ThrLayout for CopyAtom Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, double, TagToStrideA_t<cutlass::layout::RowMajor>, double, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<cutlass::layout::ColumnMajor>, TagToStrideC_t<cutlass::layout::ColumnMajor>, epilogue::thread::LinearCombination<double, 1, double, double>, cutlass::gemm::EpilogueDefault>; /* using EpilogueOutputOp = epilogue::collective::Epilogue< epilogue::thread::LinearCombination<double, 1, double, double>, Layout<Shape <_64,_32>, Stride< _1,_64>>, // SMEM layout Copy_Atom<UniversalCopy<double>,double>, // R2S with tiled_mma layout decltype(make_tiled_copy(Copy_Atom<UniversalCopy<double>,double>{},// S2R Layout<Shape <_16,_16>, Stride< _1,_16>>{}, // Thread layout Layout<Shape<_2,_1>>{})), // Value layout Copy_Atom<UniversalCopy<double>,double> // R2G with S2R_dst layout >; */ }; /////////////////////////////////////////////////////////////////////////////// // Ampere fp64 MMA NN (M-Major A and K-Major B) template <> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, double, cutlass::layout::ColumnMajor, double, cutlass::layout::ColumnMajor, double, cutlass::layout::ColumnMajor, double> { using TileShape = Shape<_128, _64, _16>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom Layout<Shape<_2,_2,_1>>, // Atom layout Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization Layout<Shape<_16,_2>,Stride<_2,_1>>, Underscore>>; // A (M,K) M-Major using SmemLayoutAtomA = decltype( composition(Swizzle<2,2,2>{}, Layout<Shape <_16, _4>, Stride< _1,_16>>{})); // M, K using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentA = 2; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom Layout<Shape <_16, _8>, Stride< _1,_16>>{}, // ThrLayout for CopyAtom Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles // B (N,K) K-Major using SmemLayoutAtomB = decltype( composition(Swizzle<2,0,4>{}, Layout<Shape <_4,_16>, Stride<_1, _4>>{}));// N, K using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentB = 1; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom Layout<Shape < _8,_16>, Stride<_16, _1>>{}, // ThrLayout for CopyAtom Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, double, TagToStrideA_t<cutlass::layout::ColumnMajor>, double, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<cutlass::layout::ColumnMajor>, TagToStrideC_t<cutlass::layout::ColumnMajor>, epilogue::thread::LinearCombination<double, 1, double, double>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // Ampere fp64 MMA NT (M-Major A and N-Major B) template <> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, double, cutlass::layout::ColumnMajor, double, cutlass::layout::RowMajor, double, cutlass::layout::ColumnMajor, double> { using TileShape = Shape<_128, _64, _16>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom Layout<Shape<_2,_2,_1>>, // Atom layout Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization Layout<Shape<_16,_2>,Stride<_2,_1>>, Underscore>>; // A (M,K) M-Major using SmemLayoutAtomA = decltype( composition(Swizzle<2,2,2>{}, Layout<Shape <_16, _4>, Stride< _1,_16>>{})); // M, K using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentA = 2; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom Layout<Shape <_16, _8>, Stride< _1,_16>>{}, // ThrLayout for CopyAtom Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles // B (N,K) N-Major using SmemLayoutAtomB = decltype( composition(Swizzle<2,2,2>{}, Layout<Shape <_16, _4>, Stride< _1,_16>>{})); // N, K using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentB = 2; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom Layout<Shape <_16, _8>, Stride< _1,_16>>{}, // ThrLayout for CopyAtom Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, double, TagToStrideA_t<cutlass::layout::ColumnMajor>, double, TagToStrideB_t<cutlass::layout::RowMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<cutlass::layout::ColumnMajor>, TagToStrideC_t<cutlass::layout::ColumnMajor>, epilogue::thread::LinearCombination<double, 1, double, double>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // Ampere fp64 MMA TT (K-Major A and N-Major B) template <> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm80, double, cutlass::layout::RowMajor, double, cutlass::layout::RowMajor, double, cutlass::layout::ColumnMajor, double> { using TileShape = Shape<_128, _64, _16>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM80_8x8x4_F64F64F64F64_TN>, // Atom Layout<Shape<_2,_2,_1>>, // Atom layout Tile<Layout<Shape<_16,_2>,Stride<_2,_1>>, // 32x32x4 MMA with perm for load vectorization Layout<Shape<_16,_2>,Stride<_2,_1>>, Underscore>>; // A (M,K) K-Major using SmemLayoutAtomA = decltype( composition(Swizzle<2,0,4>{}, Layout<Shape <_4,_16>, Stride<_1, _4>>{})); // M, K using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentA = 1; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<double>, double>{}, // CopyAtom Layout<Shape < _8,_16>, Stride<_16, _1>>{}, // ThrLayout for CopyAtom Layout<Shape<_1,_1>>{})); // Value layout: 1x1 doubles // B (N,K) N-Major using SmemLayoutAtomB = decltype( composition(Swizzle<2,2,2>{}, Layout<Shape <_16, _4>, Stride< _1,_16>>{})); // N, K using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentB = 2; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, // CopyAtom Layout<Shape <_16, _8>, Stride< _1,_16>>{}, // ThrLayout for CopyAtom Layout<Shape<_2,_1>>{})); // Value layout: 2x1 doubles // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, double, TagToStrideA_t<cutlass::layout::RowMajor>, double, TagToStrideB_t<cutlass::layout::RowMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = epilogue::collective::DefaultEpilogue< TagToStrideC_t<cutlass::layout::ColumnMajor>, TagToStrideC_t<cutlass::layout::ColumnMajor>, epilogue::thread::LinearCombination<double, 1, double, double>, cutlass::gemm::EpilogueDefault>; }; /////////////////////////////////////////////////////////////////////////////// // Hopper fp64 MMA TN template <> struct DefaultGemmConfigurationToCutlass3Types< arch::OpClassTensorOp, arch::Sm90, double, cutlass::layout::RowMajor, double, cutlass::layout::ColumnMajor, double, cutlass::layout::ColumnMajor, double> { using TileShape = Shape<_128, _64, _16>; static constexpr int ThreadCount = 128; using DispatchPolicy = MainloopSm80CpAsync<3>; using TiledMma = TiledMMA< MMA_Atom<SM90_16x8x16_F64F64F64F64_TN>, Layout<Shape<_2,_2,_1>>>; // A (M,K) K-major using SmemLayoutAtomA = decltype( make_ordered_layout(Shape<_128,_16>{}, Step < _2, _1>{})); // M, K using SmemCopyAtomA = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentA = 2; using GmemTiledCopyA = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, Layout<Shape <_16,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_2>>{})); // B (N,K) K-major using SmemLayoutAtomB = decltype( make_ordered_layout(Shape<_64,_16>{}, Step < _2, _1>{})); // N, K using SmemCopyAtomB = Copy_Atom<DefaultCopy, double>; static constexpr int kAlignmentB = 2; using GmemTiledCopyB = decltype( make_tiled_copy(Copy_Atom<SM80_CP_ASYNC_CACHEALWAYS<cute::uint128_t>, double>{}, Layout<Shape <_16,_8>, Stride< _8,_1>>{}, Layout<Shape < _1,_2>>{})); // Mainloop using CollectiveMainloop = collective::CollectiveMma< DispatchPolicy, TileShape, double, TagToStrideA_t<cutlass::layout::RowMajor>, double, TagToStrideB_t<cutlass::layout::ColumnMajor>, TiledMma, GmemTiledCopyA, SmemLayoutAtomA, SmemCopyAtomA, cute::identity, // A GmemTiledCopyB, SmemLayoutAtomB, SmemCopyAtomB, cute::identity // B >; // Epilogue using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape, Shape<_1,_1,_1>, cutlass::epilogue::collective::EpilogueTileAuto, double, double, double, cutlass::layout::ColumnMajor, 1, double, cutlass::layout::ColumnMajor, 1, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; }; /////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace gemm } // namespace cutlass
cutlass/test/unit/gemm/device/default_gemm_configuration.hpp/0
{ "file_path": "cutlass/test/unit/gemm/device/default_gemm_configuration.hpp", "repo_id": "cutlass", "token_count": 23557 }
52
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Tests for device-wide GEMM interface with stream-K scheduling */ #include <iostream> #include "cutlass/cutlass.h" #include "cute/tensor.hpp" #include "cute/atom/mma_atom.hpp" #include "cutlass/numeric_types.h" #include "cutlass/gemm/device/gemm_universal_adapter.h" #include "cutlass/gemm/kernel/gemm_universal.hpp" #include "cutlass/gemm/kernel/tile_scheduler.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/epilogue/collective/sm70_epilogue_vectorized.hpp" #include "cutlass/epilogue/collective/default_epilogue.hpp" #include "cutlass/epilogue/thread/linear_combination.h" #include "../../common/cutlass_unit_test.h" #include "gemm_testbed_3x.hpp" #if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED) using namespace cute; TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x1x1) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 8, ElementB, LayoutB, 8, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_1x2x1) { using ElementA = cutlass::half_t; using LayoutA = cutlass::layout::RowMajor; using ElementB = cutlass::half_t; using LayoutB = cutlass::layout::RowMajor; using ElementAccumulator = float; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_1,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, ElementA, LayoutA, 8, ElementB, LayoutB, 8, ElementAccumulator, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 2x2x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_1,_2,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 4x1x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_4x1x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_4,_1,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 1x4x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 128x128x64_1x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_1,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// /////////////////////////////// Cluster 2x4x1 //////////////////////////////// /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16t_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } /////////////////////////////////////////////////////////////////////////////// TEST(SM90_Device_Gemm_f16n_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k, 256x128x64_2x4x1) { using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_4,_1>; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAuto, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::collective::EpilogueScheduleAuto >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f16n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f16t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f32n_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 128x128x64_2x2x1) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_128,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, float, LayoutC, 4, float, LayoutC, 4, cutlass::epilogue::TmaWarpSpecializedCooperative >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAll<Gemm>(1.0, 1.0)); } TEST(SM90_Device_Gemm_f16t_f16n_f32t_tensor_op_gmma_f32_cooperative_stream_k_epilogue, 256x128x64_2x2x1_BiasF32_ReLU) { using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::RowMajor; using TileShape_MNK = Shape<_256,_128,_64>; using ClusterShape_MNK = Shape<_2,_2,_1>; using EpilogueSchedule = cutlass::epilogue::TmaWarpSpecializedCooperative; using FusionOperation = cutlass::epilogue::fusion::LinCombPerRowBiasEltActAux< LayoutC, cutlass::epilogue::thread::ReLu, cutlass::half_t, float, cutlass::half_t, float>; using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, TileShape_MNK, ClusterShape_MNK, cutlass::epilogue::collective::EpilogueTileAuto, float, float, cutlass::half_t, LayoutC, 8, cutlass::half_t, LayoutC, 8, EpilogueSchedule, FusionOperation >::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::CollectiveBuilder< cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp, cutlass::half_t, LayoutA, 8, cutlass::half_t, LayoutB, 8, float, TileShape_MNK, ClusterShape_MNK, cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(sizeof(typename CollectiveEpilogue::SharedStorage))>, cutlass::gemm::KernelTmaWarpSpecializedCooperative >::CollectiveOp; using GemmKernel = cutlass::gemm::kernel::GemmUniversal< Shape<int,int,int,int>, CollectiveMainloop, CollectiveEpilogue, cutlass::gemm::StreamKScheduler >; using Gemm = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>; EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>(1.0, 0.0)); EXPECT_TRUE(test::gemm::device::TestAllBiasElementwise<Gemm>(1.0, 1.0)); } #endif // defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu/0
{ "file_path": "cutlass/test/unit/gemm/device/sm90_gemm_f16_f16_f16_tensor_op_f32_cooperative_stream_k.cu", "repo_id": "cutlass", "token_count": 15914 }
53
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "../../common/cutlass_unit_test.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/tensor_ref.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/gemm/kernel/default_gemv.h" #include "cutlass/gemm/kernel/gemv_batched_strided.h" namespace test { namespace gemm { namespace kernel { template<typename ThreadBlockShape_, typename ThreadShape_, typename ElementAB_, typename ElementAccumulator_, typename ElementCD_, typename LayoutA_, typename LayoutB_, typename LayoutCD_, int THREAD_B = 1, // batch tile size bool DEBUG=false> void batched_gemv_kernel_test(cutlass::gemm::BatchedGemmCoord problem_size, ElementCD_ alpha = ElementCD_(1), ElementCD_ beta = ElementCD_(0), bool perf_test = false, int perf_test_iter = 1) { using ThreadBlockShape = ThreadBlockShape_; using ThreadShape = ThreadShape_; using ElementA = ElementAB_; using LayoutA = LayoutA_; using ElementB = ElementAB_; using LayoutB = LayoutB_; using ElementAccumulator = ElementCD_; using ElementCD = ElementCD_; using LayoutCD = LayoutCD_; using GemvKernel = cutlass::gemm::kernel::DefaultGemv<ThreadBlockShape, ThreadShape, ElementA, LayoutA, ElementB, LayoutB, ElementCD, LayoutCD, ElementAccumulator>; using ThreadBlockGemv = typename GemvKernel::ThreadBlockGemv; using ThreadBlockSwizzle = typename GemvKernel::ThreadBlockSwizzle; if (DEBUG) { problem_size = cutlass::gemm::BatchedGemmCoord( problem_size.m(), problem_size.n(), problem_size.k(), 1); } // Create host tensors that will be the backing store for the batches // Note that no device memory is initially allocated cutlass::HostTensor<ElementA, LayoutA> matrix_A({problem_size.m(), problem_size.k()}, false); cutlass::HostTensor<ElementB, LayoutB> matrix_B({problem_size.k(), problem_size.n()}, false); cutlass::HostTensor<ElementCD, LayoutCD> matrix_C_computed({problem_size.m(), problem_size.n()}, false); cutlass::HostTensor<ElementCD, LayoutCD> matrix_C_reference({problem_size.m(), problem_size.n()}, false); // Reserve memory for the batch of tensors matrix_A.reserve(problem_size.m()*problem_size.k()*problem_size.batch()); matrix_B.reserve(problem_size.n()*problem_size.k()*problem_size.batch()); matrix_C_computed.reserve(problem_size.m()*problem_size.n()*problem_size.batch()); matrix_C_reference.reserve(problem_size.m()*problem_size.n()*problem_size.batch(), false); // Fill eatch tensor batch const int seed = 9876; for (int b = 0; b < problem_size.batch(); b++) { if(DEBUG) { cutlass::reference::host::BlockFillSequential( matrix_A.host_data_ptr_offset(b*matrix_A.capacity()), matrix_A.capacity()); cutlass::reference::host::BlockFillSequential( matrix_B.host_data_ptr_offset(b*matrix_B.capacity()), matrix_B.capacity()); } else { cutlass::reference::host::TensorFillRandomUniform( matrix_A.host_view(b*matrix_A.capacity()), seed + 1660, 8, -8, 0 ); cutlass::reference::host::TensorFillRandomUniform( matrix_B.host_view(b*matrix_B.capacity()), seed + 1880, 8, -8, 0 ); } cutlass::reference::host::TensorFill(matrix_C_computed.host_view(b*matrix_C_computed.capacity())); cutlass::reference::host::TensorFill(matrix_C_reference.host_view(b*matrix_C_reference.capacity())); } matrix_A.sync_device(); matrix_B.sync_device(); matrix_C_computed.sync_device(); ThreadBlockSwizzle swizzle; cutlass::gemm::BatchedGemmCoord tiled_size{ThreadBlockShape::kM, ThreadBlockShape::kN, problem_size.k(), // no split-k DEBUG ? 1 : THREAD_B }; cutlass::gemm::BatchedGemmCoord tiled_shape = swizzle.get_tiled_shape(problem_size, tiled_size); #if 0 printf("tiled_size = %d %d %d %d\n", tiled_size.m(), tiled_size.n(), tiled_size.k(), tiled_size.batch()); printf("tiled_shape = %d %d %d %d\n", tiled_shape.m(), tiled_shape.n(), tiled_shape.k(), tiled_shape.batch()); #endif // No split-k EXPECT_EQ(tiled_size.k(), problem_size.k()); dim3 grid = swizzle.get_grid_shape(tiled_shape); dim3 block(tiled_size.n() / ThreadShape::kN, tiled_size.batch(), tiled_size.k() / problem_size.k()); // Some sanity checks EXPECT_TRUE( block.x*block.y*block.z <= 1024 ); EXPECT_TRUE( block.x <= 1024 ); EXPECT_TRUE( block.y <= 1024 ); EXPECT_TRUE( block.z <= 64 ); #if 0 printf("grid dim = %d, %d, %d\n", grid.x, grid.y, grid.z); printf("block dim = %d, %d, %d\n", block.x, block.y, block.z); #endif cudaError_t result; cudaEvent_t start_event, end_event; for (int iter = 0; iter < (perf_test ? (perf_test_iter+1) : 1); ++iter) { if (perf_test && iter == 1) { result = cudaEventCreate(&start_event); EXPECT_EQ(result, cudaSuccess); result = cudaEventCreate(&end_event); EXPECT_EQ(result, cudaSuccess); result = cudaEventRecord(start_event); EXPECT_EQ(result, cudaSuccess); } if (beta == ElementCD(0)) { if (alpha == ElementCD(1)) { cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel><<< grid, block >>>( problem_size, matrix_A.device_ref(), matrix_A.capacity(), matrix_B.device_ref(), matrix_B.capacity(), matrix_C_computed.device_ref(), matrix_C_computed.capacity() ); } else { cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel><<< grid, block >>>( problem_size, alpha, matrix_A.device_ref(), matrix_A.capacity(), matrix_B.device_ref(), matrix_B.capacity(), matrix_C_computed.device_ref(), matrix_C_computed.capacity() ); } } else { cutlass::gemm::kernel::GemvBatchedStrided<GemvKernel, ElementCD, false><<< grid, block >>>( problem_size, alpha, beta, matrix_A.device_ref(), matrix_A.capacity(), matrix_B.device_ref(), matrix_B.capacity(), matrix_C_computed.device_ref(), matrix_C_computed.capacity(), matrix_C_computed.device_ref(), matrix_C_computed.capacity() ); } if (iter == 0) { result = cudaGetLastError(); EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result); } } if (perf_test) { result = cudaEventRecord(end_event); EXPECT_EQ(result, cudaSuccess); } result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << " kernel error: " << cudaGetErrorString(result); if (perf_test) { float ms; result = cudaEventElapsedTime(&ms, start_event, end_event); EXPECT_EQ(result, cudaSuccess); double flops = (double(problem_size.m()) * double(problem_size.n()) * double(problem_size.k()) * double(problem_size.batch()) * 2); // 2 for MAC double read_bytes = double(problem_size.batch()) * (sizeof(ElementA)*double(problem_size.m())*double(problem_size.k()) + sizeof(ElementB)*double(problem_size.k())*double(problem_size.n())); double write_bytes = double(problem_size.batch()) * (sizeof(ElementCD)*double(problem_size.m())*double(problem_size.n())); double avg_runtime = double(ms) / perf_test_iter; double gflops_per_sec = flops / 1.0e6 / avg_runtime; double read_bandwidth = read_bytes / 1.0e6 / avg_runtime; double write_bandwidth = write_bytes / 1.0e6 / avg_runtime; std::cout << "\n\nProblem size: " << problem_size.m() << " x " << problem_size.n() << " x " << problem_size.k() << " x " << problem_size.batch() << std::endl; std::cout << " GFLOPs: " << gflops_per_sec << std::endl; std::cout << "BW (R/W): " << read_bandwidth << " / " << write_bandwidth << " GB/sec" << std::endl; std::cout << " Runtime: " << avg_runtime << " ms" << std::endl; } else { matrix_C_computed.sync_host(); // Compute the batched gemms for (int b = 0; b < problem_size.batch(); b++) { cutlass::reference::host::Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementCD, LayoutCD, ElementCD, ElementCD> reference_gemm; reference_gemm( problem_size.mnk(), alpha, matrix_A.host_ref(b * matrix_A.capacity()), matrix_B.host_ref(b * matrix_B.capacity()), beta, matrix_C_reference.host_ref(b * matrix_C_computed.capacity())); bool passed = cutlass::reference::host::TensorEquals( matrix_C_computed.host_view(b * matrix_C_computed.capacity()), matrix_C_reference.host_view(b * matrix_C_reference.capacity())); EXPECT_TRUE(passed) //<< "A:\n" << matrix_A.host_view() << "\n" //<< "B:\n" << matrix_B.host_view() << "\n" << "Batch: " << b << "\n" << "Reference:\n" << matrix_C_reference.host_view(b * matrix_C_reference.capacity()) << "\n" << "Computed:\n" << matrix_C_computed.host_view(b * matrix_C_computed.capacity()) << "\n"; } } } template<typename ThreadBlockShape_, typename ThreadShape_, typename ElementAB_, typename ElementAccumulator_, typename ElementCD_, typename LayoutA_, typename LayoutB_, typename LayoutCD_, int THREAD_B = 1, // batch tile size bool DEBUG=false> void batched_gemv_kernel_perf_test(cutlass::gemm::BatchedGemmCoord problem_size, ElementCD_ alpha = ElementCD_(1), ElementCD_ beta = ElementCD_(0), int iter = 50) { batched_gemv_kernel_test<ThreadBlockShape_, ThreadShape_, ElementAB_, ElementAccumulator_, ElementCD_, LayoutA_, LayoutB_, LayoutCD_, THREAD_B, DEBUG>(problem_size, alpha, beta, true, iter); } } // namespace threadblock } // namespace kernel } // namespace test
cutlass/test/unit/gemm/kernel/testbed_gemv.h/0
{ "file_path": "cutlass/test/unit/gemm/kernel/testbed_gemv.h", "repo_id": "cutlass", "token_count": 7083 }
54
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level GEMM */ #include "cutlass/cutlass.h" #include "../../common/cutlass_unit_test.h" #include "cutlass/aligned_buffer.h" #include "cutlass/half.h" #include "cutlass/gemm/warp/default_mma_complex_tensor_op.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/gemm.h" #include "testbed.h" #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) //////////////////////////////////////////////////////////////////////////////////////////////////// // complex<double> * complex<double> => complex<double> // Input data type: complex<double> // Math instruction: mma.sync.aligned.m8n8k4.f64.f64.f64.f64 // Output data type: complex<double> /////////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_nt) { using Shape = cutlass::gemm::GemmShape<8, 8, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<8, 8, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_nt) { using Shape = cutlass::gemm::GemmShape<16, 16, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x32x4_8x8x4_nt) { using Shape = cutlass::gemm::GemmShape<16, 32, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x16x4_8x8x4_nt) { using Shape = cutlass::gemm::GemmShape<32, 16, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nt) { using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_nh) { using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, cutlass::ComplexTransform::kConjugate >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x4_8x8x4_ct) { using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::ComplexTransform::kConjugate, cutlass::ComplexTransform::kNone >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 8x8x4_8x8x4_tn) { using Shape = cutlass::gemm::GemmShape<8, 8, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<8, 8, 4> >().run(); } TEST(SM80_warp_gemm_complex_tensor_op_f64, 16x16x4_8x8x4_tn) { using Shape = cutlass::gemm::GemmShape<16, 16, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TestbedComplex<MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 4> >().run(); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// // complex<float> * complex<float> => complex<float> // Input data type: complex<float> // Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 // Output data type: complex<float> // Shared memory layout: Congrous //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_nt) { using Shape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_nt) { using Shape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x32x8_16x8x8_nt) { using Shape = cutlass::gemm::GemmShape<16, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<16, 32, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x16x8_16x16x8_nt) { using Shape = cutlass::gemm::GemmShape<32, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 16, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nt) { using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_nh) { using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::ComplexTransform::kNone, cutlass::ComplexTransform::kConjugate >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_ct) { using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous64b; using LayoutB = cutlass::layout::RowMajorTensorOpMultiplicandCongruous64b; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor, cutlass::ComplexTransform::kConjugate, cutlass::ComplexTransform::kNone >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() .run(); } /////////////////////////////////////////////////////////////////////////////////////////////////// // complex<float> * complex<float> => complex<float> // Input data type: complex<float> // Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 // Output data type: complex<float> // Shared memory layout: Crosswise //////////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x8_16x8x8_tn) { using Shape = cutlass::gemm::GemmShape<16, 16, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 8> >() .run(); } // TEST FAILS crosswise complex<float> TN mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32 test fails for k = 2*8 = 16 TEST(SM80_warp_gemm_complex_tensor_op_f32, 16x16x16_16x8x8_tn) { using Shape = cutlass::gemm::GemmShape<16, 16, 16>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<16, 16, 16> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x32x8_16x8x8_tn) { using Shape = cutlass::gemm::GemmShape<32, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 32x64x8_16x8x8_tn) { using Shape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 64, 8> >() .run(); } TEST(SM80_warp_gemm_complex_tensor_op_f32, 64x32x8_16x8x8_tn) { using Shape = cutlass::gemm::GemmShape<64, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; using Element = cutlass::complex<float>; using ElementC = cutlass::complex<float>; using LayoutA = cutlass::layout::RowMajorTensorOpMultiplicand64bCrosswise; using LayoutB = cutlass::layout::ColumnMajorTensorOpMultiplicand64bCrosswise; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<64, 32, 8> >() .run(); } //////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_tn) { using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::ColumnMajor; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() .run(); } //////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM80_warp_gemm_complex_tensor_op_f64, 32x32x8_8x8x4_nt) { using Shape = cutlass::gemm::GemmShape<32, 32, 4>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; using Element = cutlass::complex<double>; using ElementC = cutlass::complex<double>; using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using MmaTensorOp = typename cutlass::gemm::warp::DefaultMmaComplexTensorOp< Shape, InstructionShape, Element, LayoutA, Element, LayoutB, ElementC, cutlass::layout::RowMajor >::Type; test::gemm::warp::TransformedTestbedComplex< MmaTensorOp, cutlass::gemm::GemmShape<32, 32, 8> >() .run(); } //////////////////////////////////////////////////////////////////////////////////////////////// #endif // #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED)
cutlass/test/unit/gemm/warp/gemm_complex_sm80.cu/0
{ "file_path": "cutlass/test/unit/gemm/warp/gemm_complex_sm80.cu", "repo_id": "cutlass", "token_count": 8445 }
55
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for thread-level Reduction */ #pragma once #include "cutlass/reduction/thread/reduce.h" #include "cutlass/layout/vector.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/tensor_compare.h" namespace test { namespace reduction { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the reduction template < /// Data type of elements typename Element, /// Number of elements int N > struct Testbed_reduce_host { /// Thread-level reduction operator using Reduce = cutlass::reduction::thread::Reduce< cutlass::plus<Element>, cutlass::Array<Element, N> >; // // Data members // cutlass::Array<Element, N> tensor_in; cutlass::Array<Element, 1> reduced_tensor_computed; cutlass::Array<Element, 1> reduced_tensor_reference; // // Methods // /// Allocates workspace in device memory Testbed_reduce_host() { tensor_in.clear(); reduced_tensor_computed.clear(); reduced_tensor_reference.clear(); } /// Runs the test bool run() { // // initialize memory // for(int i = 0; i < N; i++) tensor_in.at(i) = Element(i); Reduce reduce; cutlass::Array<Element, 1> *out_ptr = &reduced_tensor_computed; out_ptr[0] = reduce(tensor_in); // // Reference implementation // Element e(0); for (int i = 0; i < N; i++) e = e + Element(i); reduced_tensor_reference.at(0) = e; // // Verify equivalence // // compare bool passed = reduced_tensor_reference[0] == reduced_tensor_computed[0]; EXPECT_TRUE(passed) << "Expected = " << float(reduced_tensor_reference.at(0)) << "\n\n" << "Actual = " << float(reduced_tensor_computed.at(0)) << "\n\n" << std::endl; return passed; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Thread-level reduction kernel template <typename Element, int N> __global__ void kernel_reduce(Element const *array_in, Element *result) { /// Thread-level reduction operator using Reduce = cutlass::reduction::thread::Reduce< cutlass::plus<Element>, cutlass::Array<Element, N> >; Reduce reduce; auto ptr_in = reinterpret_cast<cutlass::Array<Element , N> const *>(array_in); auto result_ptr = reinterpret_cast<cutlass::Array<Element , 1> *>(result); auto in = *ptr_in; result_ptr[0] = reduce(in); } /// Structure to compute the reduction template < /// Data type of elements typename Element, /// Number of elements int N > struct Testbed_reduce_device { using Layout = cutlass::layout::PackedVectorLayout; // // Data members // cutlass::HostTensor<Element, Layout> tensor_in; cutlass::HostTensor<Element, Layout> reduced_tensor_computed; cutlass::HostTensor<Element, Layout> reduced_tensor_reference; // // Methods // /// Allocates workspace in device memory Testbed_reduce_device() { tensor_in.reset(cutlass::make_Coord(N), true); reduced_tensor_computed.reset(cutlass::make_Coord(1), true); reduced_tensor_reference.reset(cutlass::make_Coord(1), true); } /// Runs the test bool run() { // // initialize memory // cutlass::reference::host::TensorFill( tensor_in.host_view(), Element(1) ); cutlass::reference::host::TensorFill( reduced_tensor_computed.host_view(), Element(0) ); cutlass::reference::host::TensorFill( reduced_tensor_reference.host_view(), Element(N) ); tensor_in.sync_device(); reduced_tensor_computed.sync_device(); reduced_tensor_reference.sync_device(); /// call the kernel kernel_reduce<Element, N><<< dim3(1, 1), dim3(1, 1, 1) >>> ( tensor_in.device_data(), reduced_tensor_computed.device_data() ); // verify no errors cudaError_t result = cudaDeviceSynchronize(); EXPECT_EQ(result, cudaSuccess) << "CUDA ERROR: " << cudaGetErrorString(result); if (result != cudaSuccess) { return false; } // Copy back results reduced_tensor_computed.sync_host(); // Verify equivalence bool passed = cutlass::reference::host::TensorEquals( reduced_tensor_computed.host_view(), reduced_tensor_reference.host_view() ); EXPECT_TRUE(passed) << "Expected = " << reduced_tensor_reference.host_view() << "\n\n" << "Actual = " << reduced_tensor_computed.host_view() << "\n\n" << std::endl; return passed; } }; } // namespace thread } // namespace reduction } // namespace test
cutlass/test/unit/reduction/thread/testbed.h/0
{ "file_path": "cutlass/test/unit/reduction/thread/testbed.h", "repo_id": "cutlass", "token_count": 2242 }
56
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief BLAS-like handle used to launch operations on the CUDA device. */ #pragma once #include <memory> #include "cutlass/library/library.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Handle object class Handle { private: /// Host workspace static int const kHostWorkspaceSize = (4 << 10); /// Provider of operations Provider provider_; /// CUDA device properties cudaDeviceProp device_; /// CUDA stream cudaStream_t stream_; /// Device workspace void *workspace_; /// Size of device workspace in bytes size_t workspace_size_; /// Indicates whether scalars are host or device pointers ScalarPointerMode scalar_pointer_mode_; /// Pointer to the most recently executed operation Operation const *last_operation_; public: /// Constructor Handle(cudaStream_t stream = nullptr, size_t workspace_size = (4<<20)); /// Destructor ~Handle(); /// Move constructor Handle(Handle && handle); /// Move assignment operator Handle &operator=(Handle && handle); // // Persistent state accessors // /// Returns compute capability of the selected device int compute_capability() const; /// Sets the current CUDA stream void set_stream(cudaStream_t stream); /// Gets the current CUDA stream cudaStream_t get_stream() const; /// Gets the current provider Provider get_provider() const; /// Sets the provider of operations void set_provider(Provider provider); /// Gets the device workspace size size_t get_workspace_size() const; /// Gets a pointer to the device workspace allocation in Global Memory void *get_workspace() const; /// Sets the size of device workspace, invalidating calls to get_device_workspace() void set_workspace_size(size_t bytes); /// Gets the scalar pointer mode ScalarPointerMode get_scalar_pointer_mode() const; /// Sets the scalar pointer mode void set_scalar_pointer_mode(ScalarPointerMode mode); /// Gets the most recently executed operation Operation const *get_last_operation() const; // // Computations // /// Executes a GEMM computation: D <= alpha * A*B + beta * C Status gemm( int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices void const * ptr_A, /// Pointer to A matrix in Global Memory int64_t lda, /// Leading dimension of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices void const * ptr_B, /// Pointer to B matrix in Global Memory int64_t ldb, /// Leading dimension of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrices void const * ptr_C, /// Pointer to C matrix int64_t ldc, /// Leading dimension of C matrix void * ptr_D, /// Pointer to D matrix int64_t ldd /// Leading dimension of D matrix ); /// Executes a GEMM computation: D <= alpha * A*B + beta * C. // // Supports batched-strided, batched array or split-K serial or split-K parallel. // Status gemm_universal( GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices void const * ptr_A, /// Pointer to A matrix in Global Memory int64_t lda, /// Leading dimension of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices void const * ptr_B, /// Pointer to B matrix in Global Memory int64_t ldb, /// Leading dimension of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C matrix LayoutTypeID layout_C, /// Layout of D matrix void const * ptr_C, /// Pointer to C matrix int64_t ldc, /// Leading dimension of C matrix NumericTypeID element_D, /// Data type of D matrix LayoutTypeID layout_D, /// Layout of D matrix void * ptr_D, /// Pointer to D matrix int64_t ldd, /// Leading dimension of D matrix int batch_count = 1, /// Batch count or number of split-K slices int64_t batch_stride_A = 0, /// Batch stride of A operand int64_t batch_stride_B = 0, /// Batch stride of B operand int64_t batch_stride_C = 0, /// Batch stride of C operand int64_t batch_stride_D = 0 /// Batch stride of D operand ); /// Planar complex GEMM /// /// Note, all data types are the real-valued base types used by the planar-complex GEMM kernel. /// Status gemm_planar_complex( int M, /// GEMM M dimension int N, /// GEMM N dimension int K, /// GEMM K dimension NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix void const * ptr_A_real, /// Pointer to real part of A matrix void const * ptr_A_imag, /// Pointer to imaginary part of A matrix int64_t lda_real, /// Leading dimension of real part of A matrix int64_t lda_imag, /// Leading dimension of imaginary part of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix void const * ptr_B_real, /// Pointer to real part of B matrix void const * ptr_B_imag, /// Pointer to imaginary part of B matrix int64_t ldb_real, /// Leading dimension of real part of B matrix int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrix void const * ptr_C_real, /// Pointer to real part of C matrix void const * ptr_C_imag, /// Pointer to imaginary part of C matrix int64_t ldc_real, /// Leading dimension of real part of C matrix int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix void * ptr_D_real, /// Pointer to real part of D matrix void * ptr_D_imag, /// Pointer to imaginary part of D matrix int64_t ldd_real, /// Leading dimension of real part of D matrix int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix int batch_count = 1, /// Number of batched GEMMs to execute int64_t batch_stride_A_real = 0, int64_t batch_stride_A_imag = 0, int64_t batch_stride_B_real = 0, int64_t batch_stride_B_imag = 0, int64_t batch_stride_C_real = 0, int64_t batch_stride_C_imag = 0, int64_t batch_stride_D_real = 0, int64_t batch_stride_D_imag = 0 ); /// Planar complex GEMM loading pointers from arrays in global memory Status gemm_planar_complex_array( int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid) int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid) int expected_K, /// Expected GEMM K dimension int batch_count, /// Number of independent GEMM computations to execute int const *M, /// Array containing the GEMM M dimension for each batch index int const *N, /// Array containing the GEMM N dimension for each batch index int const *K, /// Array containing the GEMM K dimension for each batch index NumericTypeID element_compute, /// Data type of internal accumulation NumericTypeID element_scalar, /// Data type of alpha/beta scalars void const *alpha, /// Pointer to alpha scalar NumericTypeID element_A, /// Data type of A matrix elements LayoutTypeID layout_A, /// Layout of A matrix ComplexTransform transform_A, /// Complex transformation applied to A matrix void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices int64_t lda_real, /// Leading dimension of real part of A matrix int64_t lda_imag, /// Leading dimension of imaginary part of A matrix NumericTypeID element_B, /// Data type of B matrix elements LayoutTypeID layout_B, /// Layout of B matrix ComplexTransform transform_B, /// Complex transformation applied to B matrix void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices int64_t ldb_real, /// Leading dimension of real part of B matrix int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix void const * beta, /// Pointer to beta scalar NumericTypeID element_C, /// Data type of C and D matrix void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices void const * const * ptr_C_imag, /// Pointer to array containing pointers to imaginary part of C matrices int64_t ldc_real, /// Leading dimension of real part of C matrix int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices void * const * ptr_D_imag, /// Pointer to array containing pointers to imaginary part of D matrices int64_t ldd_real, /// Leading dimension of real part of D matrix int64_t ldd_imag /// Leading dimension of imaginary part of D matrix ); }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Unique pointer storing the handle using HandlePtr = std::unique_ptr<Handle>; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Finds conv2d operation instances with Conv2d::ElementC = Reduction::ElementWorkspace Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation); ///////////////////////////////////////////////////////////////////////////////////////////////// /// Finds gemm operation instances with ElementC = Reduction::ElementWorkspace Operation const* find_gemm_operation_for_parallel_reduction(Operation const *operation); ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/include/cutlass/library/handle.h/0
{ "file_path": "cutlass/tools/library/include/cutlass/library/handle.h", "repo_id": "cutlass", "token_count": 6327 }
57
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines operations for all Rank 2K operation kinds (Syr2k, Her2k) in CUTLASS Library. */ #pragma once #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/rank_2k.h" #include "cutlass/gemm/kernel/default_rank_2k_universal.h" #include "cutlass/library/library.h" #include "library_internal.h" #include "cutlass/core_io.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace library { /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class Rank2KOperationBase : public Operation { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; static BlasMode const kBlasMode = Operator::kBlasMode; static int const kUpdateRank = Operator::kUpdateRank; static FillMode const kFillModeC = Operator::kFillModeC; using OperatorArguments = typename Operator::Arguments; protected: /// RankKDescription description_; public: /// Constructor Rank2KOperationBase(char const *name = "unknown_rank_k") { description_.name = name; description_.provider = Provider::kCUTLASS; description_.rank_k_kind = RankKKind::kUniversal; description_.fill_mode = kFillModeC; description_.blas_mode = kBlasMode; description_.num_ranks = kUpdateRank; description_.kind = OperationKind::kRank2K; description_.tile_description.threadblock_shape = make_Coord( Operator::ThreadblockShape::kM, Operator::ThreadblockShape::kN, Operator::ThreadblockShape::kK); description_.tile_description.threadblock_stages = Operator::kStages; description_.tile_description.warp_count = make_Coord( Operator::Rank2Kkernel::WarpCount::kM, Operator::Rank2Kkernel::WarpCount::kN, Operator::Rank2Kkernel::WarpCount::kK); description_.tile_description.math_instruction.instruction_shape = make_Coord( Operator::InstructionShape::kM, Operator::InstructionShape::kN, Operator::InstructionShape::kK); description_.tile_description.math_instruction.element_accumulator = NumericTypeMap<ElementAccumulator>::kId; description_.tile_description.math_instruction.opcode_class = OpcodeClassMap<typename Operator::OperatorClass>::kId; description_.tile_description.math_instruction.math_operation = MathOperationMap<typename Operator::Operator>::kId; description_.tile_description.minimum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMin; description_.tile_description.maximum_compute_capability = ArchMap<typename Operator::ArchTag, typename Operator::OperatorClass>::kMax; description_.A = make_TensorDescription<ElementA, LayoutA>(Operator::kAlignmentA); description_.B = make_TensorDescription<ElementB, LayoutB>(Operator::kAlignmentB); description_.C = make_TensorDescription<ElementC, LayoutC>(Operator::kAlignmentC); description_.element_epilogue = NumericTypeMap<ElementCompute>::kId; description_.split_k_mode = SplitKMode::kNone; description_.transform_A = ComplexTransformMap<Operator::kTransformA>::kId; description_.transform_B = ComplexTransformMap<Operator::kTransformB>::kId; } /// Returns the description of the SYRK operation virtual OperationDescription const & description() const { return description_; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Operator_> class Rank2KOperation : public Rank2KOperationBase<Operator_> { public: using Operator = Operator_; using ElementA = typename Operator::ElementA; using LayoutA = typename Operator::LayoutA; using ElementB = typename Operator::ElementB; using LayoutB = typename Operator::LayoutB; using ElementC = typename Operator::ElementC; using LayoutC = typename Operator::LayoutC; using ElementAccumulator = typename Operator::ElementAccumulator; using ElementCompute = typename Operator::EpilogueOutputOp::ElementCompute; static BlasMode const kBlasMode = Operator::kBlasMode; static int const kUpdateRank = Operator::kUpdateRank; static FillMode const kFillModeC = Operator::kFillModeC; using OperatorArguments = typename Operator::Arguments; public: /// Constructor Rank2KOperation(char const *name = "unknown_rank_2k"): Rank2KOperationBase<Operator_>(name) { this->description_.rank_k_kind = RankKKind::kUniversal; } protected: /// Constructs the arguments structure given the configuration and arguments static Status construct_arguments_( OperatorArguments &operator_args, RankKConfiguration const *configuration) { //operator_args.mode = configuration->mode; operator_args.problem_size = configuration->problem_size; operator_args.batch_count = configuration->batch_count; operator_args.lda = int(configuration->lda); operator_args.ldb = int(configuration->ldb); operator_args.ldc = int(configuration->ldc); operator_args.ldd = int(configuration->ldd); return Status::kSuccess; } /// Constructs the arguments structure given the configuration and arguments static Status update_arguments_( OperatorArguments &operator_args, RankKArguments const *arguments) { if (arguments->pointer_mode == ScalarPointerMode::kHost) { typename Operator::EpilogueOutputOp::Params params( *static_cast<ElementCompute const *>(arguments->alpha), *static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else if (arguments->pointer_mode == ScalarPointerMode::kDevice){ typename Operator::EpilogueOutputOp::Params params( static_cast<ElementCompute const *>(arguments->alpha), static_cast<ElementCompute const *>(arguments->beta) ); operator_args.epilogue = params; } else { return Status::kErrorInvalidProblem; } // update arguments operator_args.ptr_A = arguments->A; operator_args.ptr_B = arguments->B; operator_args.ptr_C = arguments->C; operator_args.ptr_D = arguments->D; operator_args.batch_stride_A = arguments->batch_stride_A; operator_args.batch_stride_B = arguments->batch_stride_B; operator_args.batch_stride_C = arguments->batch_stride_C; operator_args.batch_stride_D = arguments->batch_stride_D; return Status::kSuccess; } public: /// Returns success if the operation can proceed virtual Status can_implement( void const *configuration_ptr, void const *arguments_ptr) const { RankKConfiguration const *configuration = static_cast<RankKConfiguration const *>(configuration_ptr); RankKArguments const *arguments = static_cast<RankKArguments const *>(arguments_ptr); OperatorArguments args; Status status = construct_arguments_(args, configuration); if (status != Status::kSuccess) { return status; } status = update_arguments_(args, arguments); if (status != Status::kSuccess) { return status; } return Operator::can_implement(args); } /// Gets the host-side workspace virtual uint64_t get_host_workspace_size( void const *configuration) const { return sizeof(Operator); } /// Gets the device-side workspace virtual uint64_t get_device_workspace_size( void const *configuration_ptr, void const *arguments_ptr = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<RankKConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return 0; } uint64_t size = Operator::get_workspace_size(args); return size; } /// Initializes the workspace virtual Status initialize( void const *configuration_ptr, void *host_workspace, void *device_workspace, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = construct_arguments_( args, static_cast<RankKConfiguration const *>(configuration_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = new (host_workspace) Operator; //std::cout << "initialize() library::Rank2KOperation" << std::endl; //print_operator_args(args); status = op->initialize(args, device_workspace, stream); return status; } /// Runs the kernel virtual Status run( void const *arguments_ptr, void *host_workspace, void *device_workspace = nullptr, cudaStream_t stream = nullptr) const { OperatorArguments args; Status status = update_arguments_( args, static_cast<RankKArguments const *>(arguments_ptr)); if (status != Status::kSuccess) { return status; } Operator *op = static_cast<Operator *>(host_workspace); status = op->update(args, device_workspace); if (status != Status::kSuccess) { return status; } //std::cout << "run() library::Rank2KOperation" << std::endl; //print_operator_args(args); status = op->run(stream); return status; } /// Call print_operator_args from the Conv2dOperation::initialize() // to dump arguments passed on to cutlass operator for debugging void print_operator_args(OperatorArguments &operator_args) const { std::cout << "Rank2KOperation::OperatorArguments" << std::endl << " problem_size:" << std::endl << operator_args.problem_size << std::endl << " epilogue (alpha, beta): " << operator_args.epilogue.alpha << ", " << operator_args.epilogue.beta << std::endl << " ref_A (ptr, {stride}): " << operator_args.ptr_A << ", {" << operator_args.lda << "}" << std::endl << " ref_B (ptr, {stride}): " << operator_args.ptr_B << ", {" << operator_args.ldb << "}" << std::endl << " ref_C (ptr, {stride}): " << operator_args.ptr_C << ", {" << operator_args.ldc << "}" << std::endl << " ref_D (ptr, {stride}): " << operator_args.ptr_D << ", {" << operator_args.ldd << "}" << std::endl; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace library } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/library/src/rank_2k_operation.h/0
{ "file_path": "cutlass/tools/library/src/rank_2k_operation.h", "repo_id": "cutlass", "token_count": 4268 }
58
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Helper functions for mapping CUTLASS concepts to cuDNN. */ #pragma once #if CUTLASS_ENABLE_CUDNN #include <cuda_runtime.h> #include <cudnn.h> #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/util/device_memory.h" #include "cutlass/library/library.h" #include "enumerated_types.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Converts a cuDNN status to cutlass::Status Status get_cutlass_status(cudnnStatus_t cudnn_status); /// Converts a cuDNN status to cutlass::profiler::Disposition Disposition get_cutlass_disposition(cudnnStatus_t cudnn_status); /// Checks cudnnStatus_t converts to cutlas status and returns if Status::kSuccess o.w. throws exception Status checkCudnnErr(cudnnStatus_t cudnn_status); /// Maps a CUTLASS conv mode to a cuDNN conv mode enumeration bool get_cudnn_conv_mode(cudnnConvolutionMode_t &cudnn_conv_mode, conv::Mode conv_mode); /// Maps a CUTLASS layout type to a cuDNN data type enumeration bool get_cudnn_layout(cudnnTensorFormat_t &cudnn_layout, library::LayoutTypeID layout); /// Maps a CUTLASS numeric type to a cuDNN data type enumeration bool get_cudnn_datatype(cudnnDataType_t &cudnn_element_type, library::NumericTypeID element_type); /// Maps CUTLASS math OpcodeClassID and MathOperationID to cuDNN math_type bool get_cudnn_mathtype(cudnnMathType_t &cudnn_math_type, library::ConvDescription const &conv_desc); /// Returns a status if cudnn can satisfy a particular Conv2d description Status cudnn_satisfies(library::ConvDescription const &desc, library::Conv2dConfiguration const &configuration); /// Returns a status if cudnn can satisfy a particular Conv3d description Status cudnn_satisfies(library::ConvDescription const &desc, library::Conv3dConfiguration const &configuration); /// Cudnn compute type seems to be hardcoded to float (To handle a possible cudnn issue) float cast_cudnn_compute_type_to_float(library::NumericTypeID type, void const * src); /// This is a helper class to create cudnnHandle_t automatically on CudnnCreate object creation and /// to destroy cudnnHandle_t on CudnnCreate object destruction. /// Additionally, it provides implicit cast from CudnnCreate's object to cudnnHandle_t's object class CudnnCreate { private: cudnnHandle_t handle; cudnnStatus_t status; public: CudnnCreate() { status = cudnnCreate(&handle); } ~CudnnCreate() { cudnnDestroy(handle); } /// Implicit cast CudnnCreate object to cudnnHandle_t operator cudnnHandle_t() const { return handle; } /// returns cudnnStatus_t for handle creation cudnnStatus_t get_cudnn_create_status() { return status; } }; namespace detail { /// Dispatcher to cudnn convolution operators struct cudnnConvDispatcher { // // Data members // //library::Conv2dConfiguration configuration; library::ConvArguments arguments; library::ConvKind conv_kind; // cudnn-specific data structures to fill cudnn API call arguments // cudnn activation, filter, and output descriptors cudnnTensorDescriptor_t activation_desc; cudnnFilterDescriptor_t filter_desc; cudnnTensorDescriptor_t output_desc; cudnnConvolutionDescriptor_t conv_desc; // cudnn datatypes cudnnDataType_t data_type_activation; cudnnDataType_t data_type_filter; cudnnDataType_t data_type_output; // cudnn layouts cudnnTensorFormat_t layout_activation; cudnnTensorFormat_t layout_filter; cudnnTensorFormat_t layout_output; // cudnn convolution mode cudnnConvolutionMode_t conv_mode; // cudnn math type (tensorop, tensorop with conversion, simt) cudnnMathType_t math_type; // cudnn compute data type cudnnDataType_t compute_type; // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue) float alpha; float beta; // cudnn workspace size_t workspace_size_in_bytes = 0; cutlass::device_memory::allocation<char> workspace; // select cudnn's implicit gemm precomputed algorithm with tensor operations static cudnnConvolutionFwdAlgo_t const fprop_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; static cudnnConvolutionBwdDataAlgo_t const dgrad_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; static cudnnConvolutionBwdFilterAlgo_t const wgrad_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; Status status; // // Methods // // TODO: unify ctor cudnnConvDispatcher for conv2d and conv3d by unifying Conv2dConfiguration // ctor for conv2d cudnnConvDispatcher( library::ConvDescription const &op_desc, library::Conv2dConfiguration configuration, library::ConvArguments arguments_, cudnnHandle_t handle ): //configuration(configuration_), arguments(arguments_), conv_kind(op_desc.conv_kind), status(Status::kSuccess) { bool good = true; // Get cudnn datatype, layout, and convolution mode from library::ConvDescription good = (good && get_cudnn_datatype(data_type_activation, op_desc.A.element)); good = (good && get_cudnn_datatype(data_type_filter, op_desc.B.element)); good = (good && get_cudnn_datatype(data_type_output, op_desc.C.element)); good = (good && get_cudnn_layout(layout_activation, op_desc.A.layout)); good = (good && get_cudnn_layout(layout_filter, op_desc.B.layout)); good = (good && get_cudnn_layout(layout_output, op_desc.C.layout)); good = (good && get_cudnn_conv_mode(conv_mode, configuration.problem_size.mode)); // Get cudnn mathtype (cudnnMathType_t) good = (good && get_cudnn_mathtype(math_type, op_desc)); good = (good && get_cudnn_datatype( compute_type, op_desc.tile_description.math_instruction.element_accumulator)); // Check cutlass Conv2d description has equivalent operator in cudnn if (!good) { status = Status::kErrorNotSupported; return; } // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue) alpha = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.alpha); beta = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.beta); // Create convolution descriptor object status = get_cutlass_status(cudnnCreateConvolutionDescriptor(&conv_desc)); // Configure convolution operator std::vector<int> padding {configuration.problem_size.pad_h, configuration.problem_size.pad_w}; std::vector<int> stride {configuration.problem_size.stride_h, configuration.problem_size.stride_w}; std::vector<int> dilation {configuration.problem_size.dilation_h, configuration.problem_size.dilation_w}; status = get_cutlass_status( cudnnSetConvolutionNdDescriptor( conv_desc, op_desc.conv_dim, padding.data(), stride.data(), dilation.data(), conv_mode, compute_type )); // Set groups status = get_cutlass_status(cudnnSetConvolutionGroupCount(conv_desc, configuration.problem_size.groups)); // Create activation, filter, and output descriptor objects status = get_cutlass_status(cudnnCreateTensorDescriptor(&activation_desc)); status = get_cutlass_status(cudnnCreateFilterDescriptor(&filter_desc)); status = get_cutlass_status(cudnnCreateTensorDescriptor(&output_desc)); // Set activation, filter, and output descriptor status = get_cutlass_status( cudnnSetTensor4dDescriptor( activation_desc, layout_activation, data_type_activation, configuration.problem_size.N, configuration.problem_size.C, configuration.problem_size.H, configuration.problem_size.W )); status = get_cutlass_status( cudnnSetFilter4dDescriptor( filter_desc, data_type_filter, layout_filter, configuration.problem_size.K, configuration.problem_size.C / configuration.problem_size.groups, configuration.problem_size.R, configuration.problem_size.S )); status = get_cutlass_status( cudnnSetTensor4dDescriptor( output_desc, layout_output, data_type_output, configuration.problem_size.N, configuration.problem_size.K, configuration.problem_size.P, configuration.problem_size.Q )); // Set math instruction to tensor op status = get_cutlass_status( cudnnSetConvolutionMathType(conv_desc, math_type)); // Initialize workspace switch (conv_kind) { case library::ConvKind::kFprop: status = get_cutlass_status( cudnnGetConvolutionForwardWorkspaceSize( handle, activation_desc, filter_desc, conv_desc, output_desc, fprop_algo, &workspace_size_in_bytes )); break; case library::ConvKind::kDgrad: status = get_cutlass_status( cudnnGetConvolutionBackwardDataWorkspaceSize( handle, filter_desc, output_desc, conv_desc, activation_desc, dgrad_algo, &workspace_size_in_bytes )); break; case library::ConvKind::kWgrad: status = get_cutlass_status( cudnnGetConvolutionBackwardFilterWorkspaceSize( handle, activation_desc, output_desc, conv_desc, filter_desc, wgrad_algo, &workspace_size_in_bytes )); break; } workspace = cutlass::device_memory::allocation<char>(workspace_size_in_bytes); } // ctor for conv3d cudnnConvDispatcher( library::ConvDescription const &op_desc, library::Conv3dConfiguration configuration, library::ConvArguments arguments_, cudnnHandle_t handle ): //configuration(configuration_), arguments(arguments_), conv_kind(op_desc.conv_kind), status(Status::kSuccess) { bool good = true; // Get cudnn datatype, layout, and convolution mode from library::ConvDescription good = (good && get_cudnn_datatype(data_type_activation, op_desc.A.element)); good = (good && get_cudnn_datatype(data_type_filter, op_desc.B.element)); good = (good && get_cudnn_datatype(data_type_output, op_desc.C.element)); good = (good && get_cudnn_layout(layout_activation, op_desc.A.layout)); good = (good && get_cudnn_layout(layout_filter, op_desc.B.layout)); good = (good && get_cudnn_layout(layout_output, op_desc.C.layout)); good = (good && get_cudnn_conv_mode(conv_mode, configuration.problem_size.mode)); // cudnn compute type seems to be hardcoded to float (to handle a possible a cudnn issue) alpha = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.alpha); beta = cast_cudnn_compute_type_to_float(op_desc.element_epilogue, arguments.beta); good = (good && get_cudnn_datatype( compute_type, op_desc.tile_description.math_instruction.element_accumulator)); // Check cutlass Conv2d description has equivalent operator in cudnn if (!good) { status = Status::kErrorNotSupported; } // Create convolution descriptor object status = get_cutlass_status(cudnnCreateConvolutionDescriptor(&conv_desc)); // Configure convolution operator std::vector<int> padding {configuration.problem_size.pad_d, configuration.problem_size.pad_h, configuration.problem_size.pad_w}; std::vector<int> stride {configuration.problem_size.stride_d, configuration.problem_size.stride_h, configuration.problem_size.stride_w}; std::vector<int> dilation {configuration.problem_size.dilation_d, configuration.problem_size.dilation_h, configuration.problem_size.dilation_w}; status = get_cutlass_status( cudnnSetConvolutionNdDescriptor( conv_desc, op_desc.conv_dim, padding.data(), stride.data(), dilation.data(), conv_mode, compute_type )); // Set groups status = get_cutlass_status(cudnnSetConvolutionGroupCount(conv_desc, configuration.problem_size.groups)); // Create activation, filter, and output descriptor objects status = get_cutlass_status(cudnnCreateTensorDescriptor(&activation_desc)); status = get_cutlass_status(cudnnCreateFilterDescriptor(&filter_desc)); status = get_cutlass_status(cudnnCreateTensorDescriptor(&output_desc)); // Set activation descriptor std::vector<int> activation_extent { configuration.problem_size.N, configuration.problem_size.C, configuration.problem_size.D, configuration.problem_size.H, configuration.problem_size.W }; std::vector<int> activation_stride { configuration.layout_activations.stride()[3], 1, configuration.layout_activations.stride()[2], configuration.layout_activations.stride()[1], configuration.layout_activations.stride()[0] }; status = get_cutlass_status( cudnnSetTensorNdDescriptor( activation_desc, data_type_activation, op_desc.conv_dim + 2, activation_extent.data(), activation_stride.data() )); // Set filter descriptor std::vector<int> filter_extent { configuration.problem_size.K, configuration.problem_size.C, configuration.problem_size.T, configuration.problem_size.R, configuration.problem_size.S }; std::vector<int> filter_stride { configuration.layout_filters.stride()[3], 1, configuration.layout_filters.stride()[2], configuration.layout_filters.stride()[1], configuration.layout_filters.stride()[0] }; status = get_cutlass_status( cudnnSetFilterNdDescriptor( filter_desc, data_type_filter, layout_filter, op_desc.conv_dim + 2, filter_extent.data() )); // Set output descriptor std::vector<int> output_extent { configuration.problem_size.N, configuration.problem_size.K, configuration.problem_size.Z, configuration.problem_size.P, configuration.problem_size.Q }; std::vector<int> output_stride { configuration.layout_output.stride()[3], 1, configuration.layout_output.stride()[2], configuration.layout_output.stride()[1], configuration.layout_output.stride()[0] }; status = get_cutlass_status( cudnnSetTensorNdDescriptor( output_desc, data_type_output, op_desc.conv_dim + 2, output_extent.data(), output_stride.data() )); // Set math instruction to tensor op status = get_cutlass_status( cudnnSetConvolutionMathType(conv_desc, math_type)); // Initialize workspace switch (conv_kind) { case library::ConvKind::kFprop: status = get_cutlass_status( cudnnGetConvolutionForwardWorkspaceSize( handle, activation_desc, filter_desc, conv_desc, output_desc, fprop_algo, &workspace_size_in_bytes )); break; case library::ConvKind::kDgrad: status = get_cutlass_status( cudnnGetConvolutionBackwardDataWorkspaceSize( handle, filter_desc, output_desc, conv_desc, activation_desc, dgrad_algo, &workspace_size_in_bytes )); break; case library::ConvKind::kWgrad: status = get_cutlass_status( cudnnGetConvolutionBackwardFilterWorkspaceSize( handle, activation_desc, output_desc, conv_desc, filter_desc, wgrad_algo, &workspace_size_in_bytes )); break; } workspace = cutlass::device_memory::allocation<char>(workspace_size_in_bytes); } /// Executes Conv2d operator from cudnn library cudnnStatus_t operator()(cudnnHandle_t handle) { switch (conv_kind) { case library::ConvKind::kFprop: return cudnnConvolutionForward( handle, &alpha, activation_desc, activation(), filter_desc, filter(), conv_desc, fprop_algo, workspace.get(), workspace_size_in_bytes, &beta, output_desc, arguments.D ); case library::ConvKind::kDgrad: return cudnnConvolutionBackwardData( handle, &alpha, filter_desc, filter(), output_desc, output(), conv_desc, dgrad_algo, workspace.get(), workspace_size_in_bytes, &beta, activation_desc, arguments.D ); case library::ConvKind::kWgrad: return cudnnConvolutionBackwardFilter( handle, &alpha, activation_desc, activation(), output_desc, output(), conv_desc, wgrad_algo, workspace.get(), workspace_size_in_bytes, &beta, filter_desc, arguments.D ); default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns Activation Tensor void const * activation() const { switch(conv_kind) { case library::ConvKind::kFprop : return arguments.A; case library::ConvKind::kDgrad : return arguments.C; case library::ConvKind::kWgrad : return arguments.B; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns Filter Tensor void const *filter() const { switch(conv_kind) { case library::ConvKind::kFprop : return arguments.B; case library::ConvKind::kDgrad : return arguments.B; case library::ConvKind::kWgrad : return arguments.C; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } // Returns Output Tensor void const *output() const { switch(conv_kind) { case library::ConvKind::kFprop : return arguments.C; case library::ConvKind::kDgrad : return arguments.A; case library::ConvKind::kWgrad : return arguments.A; default : throw std::runtime_error("Invalid Conv Operator (fprop, dgrad, wgrad)"); } } }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// #endif //#if CUTLASS_ENABLE_CUDNN } // namespace profiler } // namespace cutlass
cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h/0
{ "file_path": "cutlass/tools/profiler/include/cutlass/profiler/cudnn_helpers.h", "repo_id": "cutlass", "token_count": 7949 }
59
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <algorithm> #include <cstring> #include "cutlass/library/util.h" #include "cutlass/library/util.h" #include "cutlass/profiler/performance_report.h" #include "cutlass/profiler/debug.h" namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__unix__) #define SHELL_COLOR_BRIGHT() "\033[1;37m" #define SHELL_COLOR_GREEN() "\033[1;32m" #define SHELL_COLOR_RED() "\033[1;31m" #define SHELL_COLOR_END() "\033[0m" #else #define SHELL_COLOR_BRIGHT() "" #define SHELL_COLOR_GREEN() "" #define SHELL_COLOR_RED() "" #define SHELL_COLOR_END() "" #endif ///////////////////////////////////////////////////////////////////////////////////////////////// PerformanceReport::PerformanceReport( Options const &options, std::vector<std::string> const &argument_names, library::OperationKind const &op_kind ): options_(options), argument_names_(argument_names), problem_index_(0), good_(true), op_kind_(op_kind) { // Strip '.csv' if present std::string base_path = options_.report.output_path; base_path = base_path.substr(0, base_path.rfind(".csv")); op_file_name_ = base_path + "." + to_string(op_kind_) + ".csv"; base_path = options_.report.junit_output_path; base_path = base_path.substr(0, base_path.rfind(".xml")); base_path = base_path.substr(0, base_path.rfind(".junit")); op_junit_file_name_ = base_path + "." + to_string(op_kind_) + ".junit.xml"; // // Open output file for operation of PerformanceReport::op_kind // if (!options_.report.output_path.empty()) { bool print_header = true; if (options_.report.append) { std::ifstream test_output_file(op_file_name_); if (test_output_file.is_open()) { print_header = false; test_output_file.close(); } output_file_.open(op_file_name_, std::ios::app); } else { output_file_.open(op_file_name_); } if (!output_file_.good()) { std::cerr << "Could not open output file at path '" << options_.report.output_path << "'" << std::endl; good_ = false; } if (print_header) { print_csv_header_(output_file_) << std::endl; } } if (!options_.report.junit_output_path.empty()) { junit_output_file_.open(op_junit_file_name_); if (!junit_output_file_.good()) { std::cerr << "Could not open junit output file at path '" << options_.report.junit_output_path << "'" << std::endl; good_ = false; } print_junit_header_(junit_output_file_); } } void PerformanceReport::next_problem() { ++problem_index_; } void PerformanceReport::append_result(PerformanceResult result) { result.problem_index = problem_index_; if (options_.report.verbose) { std::cout << "\n"; print_result_pretty_(std::cout, result) << std::flush; } if (junit_output_file_.is_open()) { print_junit_result_(junit_output_file_, result); } if (output_file_.is_open()) { print_result_csv_(output_file_, result) << std::endl; } else { concatenated_results_.push_back(result); } } void PerformanceReport::sort_results(PerformanceResultVector &results) { struct FlopsPerByteCompare { bool operator()(const PerformanceResult &a, const PerformanceResult &b) { double a_flops_per_byte = double(a.flops) / double(a.bytes); double b_flops_per_byte = double(b.flops) / double(b.bytes); return (a_flops_per_byte < b_flops_per_byte); } }; std::stable_sort(results.begin(), results.end(), FlopsPerByteCompare()); } void PerformanceReport::append_results(PerformanceResultVector const &results) { if (options_.report.verbose) { std::cout << "\n\n"; } // For each result for (auto const & result : results) { append_result(result); } } PerformanceReport::~PerformanceReport() { // // Output results to stdout if they were not written to a file already. // if (options_.report.verbose && !concatenated_results_.empty()) { if (options_.report.sort_results) { sort_results(concatenated_results_); } std::cout << "\n\n"; std::cout << "=============================\n\n"; std::cout << "CSV Results:\n\n"; print_csv_header_(std::cout) << std::endl; for (auto const &result : concatenated_results_) { print_result_csv_(std::cout, result) << "\n"; } } else if (output_file_.is_open() && options_.report.verbose) { std::cout << "\nWrote results to '" << op_file_name_ << "'" << std::endl; } if (output_file_.is_open()) { output_file_.close(); } if (junit_output_file_.is_open()) { print_junit_footer_(junit_output_file_); junit_output_file_.close(); std::cout << "\nWrote jUnit results to '" << op_junit_file_name_ << "'" << std::endl; } } static const char *disposition_status_color(Disposition disposition) { switch (disposition) { case Disposition::kPassed: return SHELL_COLOR_GREEN(); case Disposition::kIncorrect: return SHELL_COLOR_RED(); case Disposition::kFailed: return SHELL_COLOR_RED(); default: break; } return SHELL_COLOR_END(); } /// Prints the result in human readable form std::ostream & PerformanceReport::print_result_pretty_( std::ostream &out, PerformanceResult const &result, bool use_shell_coloring) { out << "=============================\n" << " Problem ID: " << result.problem_index << "\n"; if (!options_.report.pivot_tags.empty()) { out << " Tags: "; int column_idx = 0; for (auto const & tag : options_.report.pivot_tags) { out << (column_idx++ ? "," : "") << tag.first << ":" << tag.second; } out << "\n"; } std::string shell_color_bright = use_shell_coloring ? SHELL_COLOR_BRIGHT() : ""; std::string shell_color_end = use_shell_coloring ? SHELL_COLOR_END() : ""; auto _disposition_status_color = [&](Disposition d) -> const char * { return use_shell_coloring ? disposition_status_color(d) : ""; }; out << "\n" << " Provider: " << shell_color_bright << library::to_string(result.provider, true) << shell_color_end << "\n" << " OperationKind: " << shell_color_bright << library::to_string(result.op_kind) << shell_color_end << "\n" << " Operation: " << result.operation_name << "\n\n" << " Status: " << shell_color_bright << library::to_string(result.status, true) << shell_color_end << "\n" << " Verification: " << shell_color_bright << (options_.verification.enabled ? "ON":"OFF") << shell_color_end << "\n" << " Disposition: " << _disposition_status_color(result.disposition) << to_string(result.disposition, true) << shell_color_end << "\n\n"; // Display individual verification results for each verification-provider if (options_.verification.enabled) { static int const indent_spaces = 16; for(auto & m : result.verification_map) { out << std::right << std::setw(indent_spaces) << library::to_string(m.first, true) << ": " << to_string(m.second, true) << "\n"; } } out << "\n Arguments:"; int column_idx = 0; for (auto const &arg : result.arguments) { if (!arg.second.empty()) { out << " --" << arg.first << "=" << arg.second; column_idx += int(4 + arg.first.size() + arg.second.size()); if (column_idx > 98) { out << " \\\n "; column_idx = 0; } } } out << "\n\n"; out << " Bytes: " << result.bytes << " bytes\n" << " FLOPs: " << result.flops << " flops\n" << " FLOPs/Byte: " << (result.flops / result.bytes) << "\n\n"; if (result.good()) { out << " Runtime: " << result.runtime << " ms\n" << " Memory: " << result.gbytes_per_sec() << " GiB/s\n" << "\n Math: " << result.gflops_per_sec() << " GFLOP/s\n"; } return out; } /// Prints the CSV header std::ostream & PerformanceReport::print_csv_header_( std::ostream &out) { int column_idx = 0; // Pivot tags for (auto const & tag : options_.report.pivot_tags) { out << (column_idx++ ? "," : "") << tag.first; } out << (column_idx ? "," : "") << "Problem,Provider" << ",OperationKind,Operation,Disposition,Status"; for (auto const &arg_name : argument_names_) { out << "," << arg_name; } out << ",Bytes" << ",Flops" << ",Flops/Byte" << ",Runtime" << ",GB/s" << ",GFLOPs" ; return out; } /// Print the result in CSV output std::ostream & PerformanceReport::print_result_csv_( std::ostream &out, PerformanceResult const &result) { int column_idx = 0; // Pivot tags for (auto const & tag : options_.report.pivot_tags) { out << (column_idx++ ? "," : "") << tag.second; } out << (column_idx ? "," : "") << result.problem_index << "," << to_string(result.provider, true) << "," << to_string(result.op_kind) << "," << result.operation_name << "," << to_string(result.disposition) << "," << library::to_string(result.status); for (auto const & arg : result.arguments) { out << "," << arg.second; } out << "," << result.bytes << "," << result.flops << "," << result.flops / result.bytes << "," << result.runtime; if (result.good()) { out << "," << result.gbytes_per_sec() << "," << result.gflops_per_sec() ; } else { out << std::string(2 , ',' ); } return out; } std::ostream & PerformanceReport::print_junit_header_(std::ostream &out) { out << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" << std::endl; out << "<testsuite name=\"cutlass_profiler\">" << std::endl; return out; } namespace { std::string escape_xml_special_chars(const std::string& src) { std::stringstream dst; for (char ch : src) { switch (ch) { case '&': dst << "&amp;"; break; case '\'': dst << "&apos;"; break; case '"': dst << "&quot;"; break; case '<': dst << "&lt;"; break; case '>': dst << "&gt;"; break; default: dst << ch; break; } } return dst.str(); } template<typename T> std::ostream & print_junit_result_property_(std::ostream & os, const std::string & name, const T & property) { return os << " <property name=\"" << name << "\" value=\"" << property << "\" />" << std::endl; } } std::ostream & PerformanceReport::print_junit_result_(std::ostream &out, PerformanceResult const &result) { out << " " << "<testcase name=\""; std::string delim = ""; // Pivot tags for (auto const & tag : options_.report.pivot_tags) { out << delim << tag.second; delim = "_"; } out << delim << to_string(result.op_kind); delim = "_"; out << delim << result.operation_name; for (auto const & arg : result.arguments) { out << delim << arg.second; } out << "\" "; bool skipped = false, failed = false, error = false; switch (result.disposition) { case Disposition::kNotRun: case Disposition::kNotSupported: skipped = true; break; case Disposition::kPassed: case Disposition::kNotVerified: break; case Disposition::kFailed: case Disposition::kIncorrect: failed = true; break; case Disposition::kInvalidProblem: case Disposition::kInvalid: error = true; break; }; if (skipped) { out << "status=\"notrun\""; } else { out << "status=\"run\""; } out << ">" << std::endl; if (failed) { out << " <failure message=\"" << to_string(result.disposition) << "\" />" << std::endl; } if (error) { out << " <error message=\"" << to_string(result.disposition) << "\" />" << std::endl; } out << " <system-out><![CDATA[" << std::endl; std::stringstream ss; print_result_pretty_(ss, result, false); out << escape_xml_special_chars(ss.str()) << std::endl; out << " ]]></system-out>" << std::endl; out << " </testcase>" << std::endl; return out; } std::ostream & PerformanceReport::print_junit_footer_(std::ostream &out) { out << "</testsuite>" << std::endl; return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass
cutlass/tools/profiler/src/performance_report.cpp/0
{ "file_path": "cutlass/tools/profiler/src/performance_report.cpp", "repo_id": "cutlass", "token_count": 5270 }
60
/****************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #pragma once /** * \file * \brief C++ interface to CUDA device memory management functions. */ #include <memory> #include "cutlass/platform/platform.h" #include "cutlass/numeric_types.h" #include "exceptions.h" namespace cutlass { namespace device_memory { /****************************************************************************** * Allocation lifetime ******************************************************************************/ /// Allocate a buffer of \p count elements of type \p T on the current CUDA device template <typename T> T* allocate(size_t count = 1) { T* ptr = 0; size_t bytes = 0; bytes = count * sizeof(T); cudaError_t cuda_error = cudaMalloc((void**)&ptr, bytes); if (cuda_error != cudaSuccess) { throw cuda_exception("Failed to allocate memory", cuda_error); } return ptr; } /// Free the buffer pointed to by \p ptr template <typename T> void free(T* ptr) { if (ptr) { cudaError_t cuda_error = (cudaFree(ptr)); if (cuda_error != cudaSuccess) { throw cuda_exception("Failed to free device memory", cuda_error); } } } /****************************************************************************** * Data movement ******************************************************************************/ template <typename T> void copy(T* dst, T const* src, size_t count, cudaMemcpyKind kind) { size_t bytes = count * sizeof_bits<T>::value / 8; if (bytes == 0 && count > 0) bytes = 1; cudaError_t cuda_error = (cudaMemcpy(dst, src, bytes, kind)); if (cuda_error != cudaSuccess) { throw cuda_exception("cudaMemcpy() failed", cuda_error); } } template <typename T> void copy_to_device(T* dst, T const* src, size_t count = 1) { copy(dst, src, count, cudaMemcpyHostToDevice); } template <typename T> void copy_to_host(T* dst, T const* src, size_t count = 1) { copy(dst, src, count, cudaMemcpyDeviceToHost); } template <typename T> void copy_device_to_device(T* dst, T const* src, size_t count = 1) { copy(dst, src, count, cudaMemcpyDeviceToDevice); } template <typename T> void copy_host_to_host(T* dst, T const* src, size_t count = 1) { copy(dst, src, count, cudaMemcpyHostToHost); } /// Copies elements from device memory to host-side range template <typename OutputIterator, typename T> void insert_to_host(OutputIterator begin, OutputIterator end, T const* device_begin) { size_t elements = end - begin; copy_to_host(&*begin, device_begin, elements); } /// Copies elements to device memory from host-side range template <typename T, typename InputIterator> void insert_to_device(T* device_begin, InputIterator begin, InputIterator end) { size_t elements = end - begin; copy_to_device(device_begin, &*begin, elements); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device_memory ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> class DeviceAllocation { public: /// Delete functor for CUDA device memory struct deleter { void operator()(T* ptr) { cudaError_t cuda_error = (cudaFree(ptr)); if (cuda_error != cudaSuccess) { // noexcept // throw cuda_exception("cudaFree() failed", cuda_error); return; } } }; public: // // Data members // /// Number of elements of T allocated on the current CUDA device size_t capacity; /// Smart pointer platform::unique_ptr<T, deleter> smart_ptr; public: // // Static methods // /// Static member to compute the number of bytes needed for a given number of elements static size_t bytes(size_t elements) { if (sizeof_bits<T>::value < 8) { size_t const kElementsPerByte = 8 / sizeof_bits<T>::value; return elements / kElementsPerByte; } else { size_t const kBytesPerElement = sizeof_bits<T>::value / 8; return elements * kBytesPerElement; } } public: // // Methods // /// Constructor: allocates no memory DeviceAllocation() : capacity(0) {} /// Constructor: allocates \p capacity elements on the current CUDA device DeviceAllocation(size_t _capacity) : smart_ptr(device_memory::allocate<T>(_capacity)), capacity(_capacity) {} /// Constructor: allocates \p capacity elements on the current CUDA device taking ownership of the allocation DeviceAllocation(T *ptr, size_t _capacity) : smart_ptr(ptr), capacity(_capacity) {} /// Copy constructor DeviceAllocation(DeviceAllocation const &p): smart_ptr(device_memory::allocate<T>(p.capacity)), capacity(p.capacity) { device_memory::copy_device_to_device(smart_ptr.get(), p.get(), capacity); } /// Move constructor DeviceAllocation(DeviceAllocation &&p): capacity(0) { std::swap(smart_ptr, p.smart_ptr); std::swap(capacity, p.capacity); } /// Destructor ~DeviceAllocation() { reset(); } /// Returns a pointer to the managed object T* get() const { return smart_ptr.get(); } /// Releases the ownership of the managed object (without deleting) and resets capacity to zero T* release() { capacity = 0; return smart_ptr.release(); } /// Deletes the managed object and resets capacity to zero void reset() { capacity = 0; smart_ptr.reset(); } /// Deletes managed object, if owned, and allocates a new object void reset(size_t _capacity) { reset(device_memory::allocate<T>(_capacity), _capacity); } /// Deletes managed object, if owned, and replaces its reference with a given pointer and capacity void reset(T* _ptr, size_t _capacity) { smart_ptr.reset(_ptr); capacity = _capacity; } /// Allocates a new buffer and copies the old buffer into it. The old buffer is then released. void reallocate(size_t new_capacity) { platform::unique_ptr<T, deleter> new_allocation(device_memory::allocate<T>(new_capacity)); device_memory::copy_device_to_device( new_allocation.get(), smart_ptr.get(), std::min(new_capacity, capacity)); std::swap(smart_ptr, new_allocation); std::swap(new_capacity, capacity); } /// Returns the number of elements size_t size() const { return capacity; } /// Returns the number of bytes needed to store the allocation size_t bytes() const { return bytes(capacity); } /// Returns a pointer to the object owned by *this T* operator->() const { return smart_ptr.get(); } /// Returns the deleter object which would be used for destruction of the managed object. deleter& get_deleter() { return smart_ptr.get_deleter(); } /// Returns the deleter object which would be used for destruction of the managed object (const) const deleter& get_deleter() const { return smart_ptr.get_deleter(); } /// Copies a device-side memory allocation DeviceAllocation & operator=(DeviceAllocation const &p) { if (capacity != p.capacity) { smart_ptr.reset(device_memory::allocate<T>(p.capacity)); capacity = p.capacity; } device_memory::copy_device_to_device(smart_ptr.get(), p.get(), capacity); return *this; } /// Move assignment DeviceAllocation & operator=(DeviceAllocation && p) { std::swap(smart_ptr, p.smart_ptr); std::swap(capacity, p.capacity); return *this; } /// Copies the entire allocation from another location in device memory. void copy_from_device(T const *ptr) const { copy_from_device(ptr, capacity); } /// Copies a given number of elements from device memory void copy_from_device(T const *ptr, size_t elements) const { device_memory::copy_device_to_device(get(), ptr, elements); } void copy_to_device(T *ptr) const { copy_to_device(ptr, capacity); } void copy_to_device(T *ptr, size_t elements) const { device_memory::copy_device_to_device(ptr, get(), elements); } void copy_from_host(T const *ptr) const { copy_from_host(ptr, capacity); } void copy_from_host(T const *ptr, size_t elements) const { device_memory::copy_to_device(get(), ptr, elements); } void copy_to_host(T *ptr) const { copy_to_host(ptr, capacity); } void copy_to_host(T *ptr, size_t elements) const { device_memory::copy_to_host(ptr, get(), elements); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// namespace device_memory { /// Device allocation abstraction that tracks size and capacity template <typename T> using allocation = cutlass::DeviceAllocation<T>; } // namespace device_memory ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/util/include/cutlass/util/device_memory.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/device_memory.h", "repo_id": "cutlass", "token_count": 3264 }
61
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Utilities for packing constructing canonical CuTe stride types for 3.x mainloop params. */ #pragma once #include "cute/layout.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { ///////////////////////////////////////////////////////////////////////////////////////////////// // Strides without batch mode template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Int<1>> make_cute_packed_stride(cute::Stride<IntT, cute::Int<1>> s, cute::Shape<int,int,int> shape_MKL) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); auto s_copy = s; cute::get<0>(s_copy) = static_cast<IntT>(cute::get<1>(shape_MKL)); return s_copy; } template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, IntT> make_cute_packed_stride(cute::Stride<cute::Int<1>, IntT> s, cute::Shape<int,int,int> shape_MKL) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); auto s_copy = s; cute::get<1>(s_copy) = static_cast<IntT>(cute::get<0>(shape_MKL)); return s_copy; } ///////////////////////////////////////////////////////////////////////////////////////////////// // Strides with batch mode template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Int<1>, int64_t> make_cute_packed_stride(cute::Stride<IntT, cute::Int<1>, int64_t> s, cute::Shape<int,int,int> shape_MKL) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); auto s_copy = s; cute::get<0>(s_copy) = static_cast<IntT>(cute::get<1>(shape_MKL)); int batch_count = cute::get<2>(shape_MKL); if (batch_count > 1) { cute::get<2>(s_copy) = static_cast<IntT>(cute::get<0>(shape_MKL) * cute::get<1>(shape_MKL)); } else { cute::get<2>(s_copy) = static_cast<IntT>(0); } return s_copy; } template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, IntT, int64_t> make_cute_packed_stride(cute::Stride<cute::Int<1>, IntT, int64_t> s, cute::Shape<int,int,int> shape_MKL) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); auto s_copy = s; cute::get<1>(s_copy) = static_cast<IntT>(cute::get<0>(shape_MKL)); int batch_count = cute::get<2>(shape_MKL); if (batch_count > 1) { cute::get<2>(s_copy) = static_cast<IntT>(cute::get<0>(shape_MKL) * cute::get<1>(shape_MKL)); } else { cute::get<2>(s_copy) = static_cast<IntT>(0); } return s_copy; } ///////////////////////////////////////////////////////////////////////////////////////////////// // Strides with group mode template <class StrideIntT> CUTLASS_HOST_DEVICE cute::Stride<StrideIntT, cute::Int<1>, cute::Int<0>> make_cute_packed_stride(cute::Stride<StrideIntT, cute::Int<1>, cute::Int<0>> s, cute::Shape<int,int,int> shape_MKL) { static_assert(std::is_integral_v<StrideIntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); auto s_copy = s; cute::get<0>(s_copy) = static_cast<StrideIntT>(cute::get<1>(shape_MKL)); return s_copy; } template <class StrideIntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, StrideIntT, cute::Int<0>> make_cute_packed_stride(cute::Stride<cute::Int<1>, StrideIntT, cute::Int<0>> s, cute::Shape<int,int,int> shape_MKL) { static_assert(std::is_integral_v<StrideIntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); auto s_copy = s; cute::get<1>(s_copy) = static_cast<StrideIntT>(cute::get<0>(shape_MKL)); return s_copy; } ///////////////////////////////////////////////////////////////////////////////////////////////// // Strides for convolutions // Output cutlass::layout::TensorNDHWC -> rank-3 stride (InT,_1,_0) // Note: For fprop/dgrad kernel, strides are assumed to be layout right in NZPQK/NDHWC order // and therefore can be coalesced to just q/w. For wgrad kernel, strides are assumed to be layout // right in KTRSC order and can be coalesced to just k. // We enforce this condition here with asserts. template <class IntT, size_t RankT_> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Int<1>, cute::Int<0>> make_cute_packed_stride( cute::Stride<IntT, cute::Int<1>, cute::Int<0>> s, cute::array<int32_t, RankT_> shape_output, cute::array<IntT, RankT_> stride_output, cutlass::conv::Operator conv_op) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); static_assert(RankT_ >= 3u); constexpr static int RankT = static_cast<int>(RankT_); assert(stride_output[RankT-1] == 1); cute::for_each(cute::make_seq<RankT-2>{}, [&](auto i) { assert(stride_output[i] == shape_output[i+1] * stride_output[i+1]); }); auto s_copy = s; cute::get<0>(s_copy) = (conv_op == cutlass::conv::Operator::kWgrad) ? stride_output[0] : stride_output[RankT-2]; return s_copy; } // // Activation tensor ((w, h, d, n), _1) for fprop kernel // // Activation cutlass::layout::TensorNWC -> rank-2 stride ((W,N),_1) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Stride<IntT, IntT>, cute::Int<1>> make_cute_packed_stride( cute::Stride<cute::Stride<IntT, IntT>, cute::Int<1>> s, cute::array<IntT, 3> stride_nwc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_nwc[2] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_nwc[1]; cute::get<0,1>(s_copy) = stride_nwc[0]; return s_copy; } // Activation cutlass::layout::TensorNHWC -> rank-2 stride ((W,H,N),_1) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Stride<IntT, IntT, IntT>, cute::Int<1>> make_cute_packed_stride( cute::Stride<cute::Stride<IntT, IntT, IntT>, cute::Int<1>> s, cute::array<IntT, 4> stride_nhwc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_nhwc[3] == 1); auto s_copy = s; cute::for_each(cute::make_seq<3>{}, [&](auto i) { cute::get<0,i>(s_copy) = stride_nhwc[2-i]; }); return s_copy; } // Activation cutlass::layout::TensorNDHWC -> rank-2 stride ((W,H,D,N),_1) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Stride<IntT, IntT, IntT, IntT>, cute::Int<1>> make_cute_packed_stride( cute::Stride<cute::Stride<IntT, IntT, IntT, IntT>, cute::Int<1>> s, cute::array<IntT, 5> stride_ndhwc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_ndhwc[4] == 1); auto s_copy = s; cute::for_each(cute::make_seq<4>{}, [&](auto i) { cute::get<0,i>(s_copy) = stride_ndhwc[3-i]; }); return s_copy; } // // Filter tensor (k, (_1, s, r, t)) for fprop kernel // // Filter cutlass::layout::TensorNWC -> rank-2 stride (k, (_1, s)) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT>> make_cute_packed_stride( cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT>> s, cute::array<IntT, 3> stride_ksc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_ksc[2] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_ksc[0]; cute::get<1,1>(s_copy) = stride_ksc[1]; return s_copy; } // Filter cutlass::layout::TensorNHWC -> rank-2 stride (k, (_1, s, r)) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT>> make_cute_packed_stride( cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT>> s, cute::array<IntT, 4> stride_krsc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_krsc[3] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_krsc[0]; cute::for_each(cute::make_seq<2>{}, [&](auto i) { cute::get<1,2-i>(s_copy) = stride_krsc[i+1]; }); return s_copy; } // Filter cutlass::layout::TensorNDHWC -> rank-2 stride (k, (_1, s, r, t)) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT, IntT>> make_cute_packed_stride( cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT, IntT>> s, cute::array<IntT, 5> stride_ktrsc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_ktrsc[4] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_ktrsc[0]; cute::for_each(cute::make_seq<3>{}, [&](auto i) { cute::get<1,3-i>(s_copy) = stride_ktrsc[i+1]; }); return s_copy; } // // Activation tensor (_1, (w, h, d, n)) for wgrad kernel // // It is also Filter tensor ((_1), (k, s, r, t)) for dgrad kernel // // Activation cutlass::layout::TensorNWC -> rank-2 stride (_1, (W,N)) in wgrad // Filter cutlass::layout::TensorNWC -> rank-2 stride ((_1), (k, s)) in dgrad template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, cute::Stride<IntT, IntT>> make_cute_packed_stride( cute::Stride<cute::Int<1>, cute::Stride<IntT, IntT>> s, cute::array<IntT, 3> stride_nwc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_nwc[2] == 1); auto s_copy = s; if (ConvOp == cutlass::conv::Operator::kWgrad) { cute::get<1,0>(s_copy) = stride_nwc[1]; cute::get<1,1>(s_copy) = stride_nwc[0]; } else if (ConvOp == cutlass::conv::Operator::kDgrad) { // stride_nwc in dgrad is ksc. cute::get<1,0>(s_copy) = stride_nwc[0]; cute::get<1,1>(s_copy) = stride_nwc[1]; } return s_copy; } // Activation cutlass::layout::TensorNHWC -> rank-2 stride (_1, (W,H,N)) in wgrad // Filter cutlass::layout::TensorNHWC -> rank-2 stride ((_1), (k, s, r)) in dgrad template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, cute::Stride<IntT, IntT, IntT>> make_cute_packed_stride( cute::Stride<cute::Int<1>, cute::Stride<IntT, IntT, IntT>> s, cute::array<IntT, 4> stride_nhwc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_nhwc[3] == 1); auto s_copy = s; if (ConvOp == cutlass::conv::Operator::kWgrad) { cute::for_each(cute::make_seq<3>{}, [&](auto i) { cute::get<1,i>(s_copy) = stride_nhwc[2-i]; }); } else if (ConvOp == cutlass::conv::Operator::kDgrad) { // stride_nhwc in dgrad is krsc. cute::get<1,0>(s_copy) = stride_nhwc[0]; cute::for_each(cute::make_seq<2>{}, [&](auto i) { cute::get<1,2-i>(s_copy) = stride_nhwc[i+1]; }); } return s_copy; } // Activation cutlass::layout::TensorNDHWC -> rank-2 stride (_1, (W,H,D,N)) in wgrad // Filter cutlass::layout::TensorNDHWC -> rank-2 stride ((_1), (k, s, r, t)) in dgrad template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, cute::Stride<IntT, IntT, IntT, IntT>> make_cute_packed_stride( cute::Stride<cute::Int<1>, cute::Stride<IntT, IntT, IntT, IntT>> s, cute::array<IntT, 5> stride_ndhwc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_ndhwc[4] == 1); auto s_copy = s; if (ConvOp == cutlass::conv::Operator::kWgrad) { cute::for_each(cute::make_seq<4>{}, [&](auto i) { cute::get<1,i>(s_copy) = stride_ndhwc[3-i]; }); } else if (ConvOp == cutlass::conv::Operator::kDgrad) { // stride_ndhwc in dgrad is ktrsc. cute::get<1,0>(s_copy) = stride_ndhwc[0]; cute::for_each(cute::make_seq<3>{}, [&](auto i) { cute::get<1,3-i>(s_copy) = stride_ndhwc[i+1]; }); } return s_copy; } // // NZPQ tensor (_1, nzpq) for wgrad kernel // // cutlass::layout::TensorNWC -> rank-2 stride (_1, nzpq) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, IntT> make_cute_packed_stride( cute::Stride<cute::Int<1>, IntT> s, cute::array<IntT, 3> stride_nqk, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_nqk[2] == 1); auto s_copy = s; cute::get<1>(s_copy) = stride_nqk[1]; return s_copy; } // cutlass::layout::TensorNHWC -> rank-2 stride (_1, nzpq) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, IntT> make_cute_packed_stride( cute::Stride<cute::Int<1>, IntT> s, cute::array<IntT, 4> stride_npqk, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_npqk[3] == 1); auto s_copy = s; cute::get<1>(s_copy) = stride_npqk[2]; return s_copy; } // cutlass::layout::TensorNDHWC -> rank-2 stride (_1, nzpq) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<cute::Int<1>, IntT> make_cute_packed_stride( cute::Stride<cute::Int<1>, IntT> s, cute::array<IntT, 5> stride_nzpqk, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_nzpqk[4] == 1); auto s_copy = s; cute::get<1>(s_copy) = stride_nzpqk[3]; return s_copy; } // // Wgrad output tensor (k, (_1, s, r, t), _0) // // Filter cutlass::layout::TensorKCS -> rank-3 stride (k, (_1, s), _0) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT>, cute::Int<0>> make_cute_packed_stride( cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT>, cute::Int<0>> s, [[maybe_unused]] cute::array<int32_t, 3> shape_output, cute::array<IntT, 3> stride_ksc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_ksc[2] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_ksc[0]; cute::get<1,1>(s_copy) = stride_ksc[1]; return s_copy; } // Filter cutlass::layout::TensorKCSR -> rank-3 stride (k, (_1, s, r), _0) template <class IntT> cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT>, cute::Int<0>> make_cute_packed_stride( cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT>, cute::Int<0>> s, [[maybe_unused]] cute::array<int32_t, 4> shape_output, cute::array<IntT, 4> stride_krsc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_krsc[3] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_krsc[0]; cute::for_each(cute::make_seq<2>{}, [&](auto i) { cute::get<1,2-i>(s_copy) = stride_krsc[i+1]; }); return s_copy; } // Filter cutlass::layout::TensorKCSRT -> rank-3 stride (k, (_1, s, r, t), _0) template <class IntT> CUTLASS_HOST_DEVICE cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT, IntT>, cute::Int<0>> make_cute_packed_stride( cute::Stride<IntT, cute::Stride<cute::Int<1>, IntT, IntT, IntT>, cute::Int<0>> s, [[maybe_unused]] cute::array<int32_t, 5> shape_output, cute::array<IntT, 5> stride_ktrsc, conv::Operator ConvOp) { static_assert(std::is_integral_v<IntT>, "Stride must have an integral type so it can be set dynamically. Static strides not supported."); assert(stride_ktrsc[4] == 1); auto s_copy = s; cute::get<0,0>(s_copy) = stride_ktrsc[0]; cute::for_each(cute::make_seq<3>{}, [&](auto i) { cute::get<1,3-i>(s_copy) = stride_ktrsc[i+1]; }); return s_copy; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass
cutlass/tools/util/include/cutlass/util/packed_stride.hpp/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/packed_stride.hpp", "repo_id": "cutlass", "token_count": 7277 }
62
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cmath> #include "cutlass/cutlass.h" #include "cutlass/complex.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/tensor_view.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/reference/detail/linear_to_coordinate.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace reference { namespace device { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace kernel { template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp, int kBlockSize = 128 > __global__ void TensorTransformReducePartial( TensorView<Element, Layout> view, /// View of the tensor to reduce over ComputeType identity, /// Identity element of the reduction operation ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType ComputeType *workspace) { /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] int64_t idx = threadIdx.x + blockIdx.x * blockDim.x; int64_t size = view.size(); __shared__ ComputeType scratchpad[kBlockSize]; for (; idx < size; idx += blockDim.x * gridDim.x) { // Map linear thread ID onto tensor coordinate typename Layout::TensorCoord coord; cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view.extent()); if (view.contains(coord)) { // Fetch element Element x = view.at(coord); // Transform identity = reduce(identity, transform(x)); } } scratchpad[threadIdx.x] = identity; __syncthreads(); // One thread performs the final reduction and stores out. This could be enhanced via // a tree reduction and pipelining. if (threadIdx.x == 0) { for (int i = 1; i < kBlockSize; ++i) { identity = reduce(identity, scratchpad[i]); } workspace[blockIdx.x] = identity; } } template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp, int kBlockSize = 128 > __global__ void TensorTransformReducePartial( TensorView<Element, Layout> view_A, /// View of the tensor to reduce over TensorView<Element, Layout> view_B, /// View of the tensor to reduce over ComputeType identity, /// Identity element of the reduction operation ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType ComputeType *workspace) { /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] int64_t idx = threadIdx.x + blockIdx.x * blockDim.x; auto size = static_cast<int64_t>(view_A.size()); __shared__ ComputeType scratchpad[kBlockSize]; for (; idx < size; idx += blockDim.x * gridDim.x) { // Map linear thread ID onto tensor coordinate typename Layout::TensorCoord coord; cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view_A.extent()); if (view_A.contains(coord)) { // Fetch element Element a = view_A.at(coord); Element b = view_B.at(coord); // Transform identity = reduce(identity, transform(a, b)); } } scratchpad[threadIdx.x] = identity; __syncthreads(); // One thread performs the final reduction and stores out. This could be enhanced via // a tree reduction and pipelining. if (threadIdx.x == 0) { for (int i = 1; i < kBlockSize; ++i) { identity = reduce(identity, scratchpad[i]); } workspace[blockIdx.x] = identity; } } template < typename ComputeType, typename ReduceOp, int kBlockSize = 32 > __global__ void TensorTransformReduceFinalize( ComputeType *workspace, ComputeType identity, int workspace_size, ReduceOp reduce) { __shared__ ComputeType scratchpad[kBlockSize]; for (int idx = threadIdx.x; idx < workspace_size; idx += kBlockSize) { identity = reduce(identity, workspace[idx]); } scratchpad[threadIdx.x] = identity; __syncthreads(); if (threadIdx.x == 0) { for (int i = 1; i < kBlockSize; ++i) { identity = reduce(identity, scratchpad[i]); } workspace[0] = identity; } } } // namespace kernel ///////////////////////////////////////////////////////////////////////////////////////////////// /// Transform-reduce operation over the elements of a tensor template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp > ComputeType TensorTransformReduce( TensorView<Element, Layout> view, /// View of the tensor to reduce over ComputeType identity, /// Identity element of the reduction operation ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType ComputeType *workspace, /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] int workspace_size, /// Number of elements in workspace cudaStream_t stream = nullptr, /// CUDA stream to launch into bool copy_out = true /// If true, the value of workspace[0] is copied to host and returned. Otherwise, `identity` is returned. ) { int const kBlockSize = 128; dim3 block(kBlockSize, 1); dim3 grid(workspace_size, 1); kernel::TensorTransformReducePartial< Element, Layout, ComputeType, ReduceOp, TransformOp, kBlockSize ><<< grid, block, 0, stream >>>( view, identity, reduce, transform, workspace ); int const kFinalizeBlockSize = 32; kernel::TensorTransformReduceFinalize< ComputeType, ReduceOp, kFinalizeBlockSize ><<< dim3(1, 1), dim3(kFinalizeBlockSize, 1), 0, stream >>>( workspace, identity, workspace_size, reduce ); if (copy_out) { cudaError_t result = cudaMemcpy(&identity, workspace, sizeof(identity), cudaMemcpyDeviceToHost); if (result != cudaSuccess) { throw std::runtime_error("cudaMemcpy() failed"); } } return identity; } /// Transform-reduce operation over the elements of two tensors, zipped together template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp > ComputeType TensorTransformReduce( TensorView<Element, Layout> view_A, /// View of the tensor to reduce over TensorView<Element, Layout> view_B, /// View of the tensor to reduce over ComputeType identity, /// Identity element of the reduction operation ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType ComputeType *workspace, /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0] int workspace_size, /// Number of elements in workspace cudaStream_t stream = nullptr, /// CUDA stream to launch into bool copy_out = true /// If true, the value of workspace[0] is copied to host and returned. Otherwise, `identity` is returned. ) { if (view_A.extent() != view_B.extent()) { throw std::runtime_error("Extents must be equal."); } int const kBlockSize = 128; dim3 block(kBlockSize, 1); dim3 grid(workspace_size, 1); kernel::TensorTransformReducePartial< Element, Layout, ComputeType, ReduceOp, TransformOp, kBlockSize ><<< grid, block, 0, stream >>>( view_A, view_B, identity, reduce, transform, workspace ); int const kFinalizeBlockSize = 32; kernel::TensorTransformReduceFinalize< ComputeType, ReduceOp, kFinalizeBlockSize ><<< dim3(1, 1), dim3(kFinalizeBlockSize, 1), 0, stream >>>( workspace, identity, workspace_size, reduce ); if (copy_out) { cudaError_t result = cudaMemcpy(&identity, workspace, sizeof(identity), cudaMemcpyDeviceToHost); if (result != cudaSuccess) { throw std::runtime_error("cudaMemcpy() failed"); } } return identity; } /// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side /// workspace template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp > ComputeType TensorTransformReduce( TensorView<Element, Layout> view, ComputeType identity, ReduceOp reduce, TransformOp transform, cudaStream_t stream = nullptr, int workspace_size = 0 ) { // Optionally query for the SM count to size the workspace. if (!workspace_size) { int device_idx = 0; cudaDeviceProp prop; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() failed"); } result = cudaGetDeviceProperties(&prop, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProp() failed"); } workspace_size = int(prop.multiProcessorCount); } DeviceAllocation<ComputeType> workspace(workspace_size); ComputeType output = TensorTransformReduce( view, identity, reduce, transform, workspace.get(), workspace_size, stream, true); return output; } /// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side /// workspace template < typename Element, typename Layout, typename ComputeType, typename ReduceOp, typename TransformOp > ComputeType TensorTransformReduce( TensorView<Element, Layout> view_A, TensorView<Element, Layout> view_B, ComputeType identity, ReduceOp reduce, TransformOp transform, cudaStream_t stream = nullptr, int workspace_size = 0 ) { // Optionally query for the SM count to size the workspace. if (!workspace_size) { int device_idx = 0; cudaDeviceProp prop; cudaError_t result = cudaGetDevice(&device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDevice() failed"); } result = cudaGetDeviceProperties(&prop, device_idx); if (result != cudaSuccess) { throw std::runtime_error("cudaGetDeviceProp() failed"); } workspace_size = int(prop.multiProcessorCount); } DeviceAllocation<ComputeType> workspace(workspace_size); ComputeType output = TensorTransformReduce( view_A, view_B, identity, reduce, transform, workspace.get(), workspace_size, stream, true); return output; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to compute the sum of the elements of a tensor template < typename Element, typename Layout, typename ComputeType = Element > ComputeType TensorSum( TensorView<Element, Layout> view, ComputeType identity = ComputeType(), cudaStream_t stream = nullptr, int workspace_size = 0 ) { plus<ComputeType> reduce; NumericConverter<ComputeType, Element> transform; return TensorTransformReduce( view, identity, reduce, transform, stream, workspace_size); } /// Helper to compute the sum of the squares of the elements of a tensor template < typename Element, typename Layout, typename ComputeType = Element > ComputeType TensorSumSq( TensorView<Element, Layout> view, ComputeType identity = ComputeType(), cudaStream_t stream = nullptr, int workspace_size = 0 ) { plus<ComputeType> reduce; magnitude_squared<Element, ComputeType> transform; return TensorTransformReduce( view, identity, reduce, transform, stream, workspace_size); } /// Helper to compute the norm of the elements of a tensor. template < typename Element, typename Layout, typename ComputeType = double > ComputeType TensorNorm( TensorView<Element, Layout> view, ComputeType identity = ComputeType(), cudaStream_t stream = nullptr, int workspace_size = 0 ) { return std::sqrt(TensorSumSq(view, identity, stream, workspace_size)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Helper to compute the sum of the squares of the differences of two tensors template < typename Element, typename Layout, typename ComputeType = double > ComputeType TensorSumSqDiff( TensorView<Element, Layout> view_A, TensorView<Element, Layout> view_B, ComputeType identity = ComputeType(), cudaStream_t stream = nullptr, int workspace_size = 0 ) { plus<ComputeType> reduce; magnitude_squared_difference<Element, ComputeType> transform; return TensorTransformReduce( view_A, view_B, identity, reduce, transform, stream, workspace_size); } /// Helper to compute the norm of the tensor computed as the difference of two tensors in memory template < typename Element, typename Layout, typename ComputeType = double > ComputeType TensorNormDiff( TensorView<Element, Layout> view_A, TensorView<Element, Layout> view_B, ComputeType identity = ComputeType(), cudaStream_t stream = nullptr, int workspace_size = 0 ) { return std::sqrt(TensorSumSqDiff(view_A, view_B, identity, stream, workspace_size)); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace device } // namespace reference } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h/0
{ "file_path": "cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h", "repo_id": "cutlass", "token_count": 5189 }
63
var searchData= [ ['xor_5fadd',['xor_add',['../structcutlass_1_1xor__add.html',1,'cutlass']]] ];
cutlass/docs/search/all_17.js/0
{ "file_path": "cutlass/docs/search/all_17.js", "repo_id": "cutlass", "token_count": 48 }
0
var searchData= [ ['xor_5fadd',['xor_add',['../structcutlass_1_1xor__add.html',1,'cutlass']]] ];
cutlass/docs/search/classes_15.js/0
{ "file_path": "cutlass/docs/search/classes_15.js", "repo_id": "cutlass", "token_count": 48 }
1
var searchData= [ ['kernellaunchconfiguration',['KernelLaunchConfiguration',['../structcutlass_1_1KernelLaunchConfiguration.html',1,'cutlass']]] ];
cutlass/docs/search/classes_9.js/0
{ "file_path": "cutlass/docs/search/classes_9.js", "repo_id": "cutlass", "token_count": 48 }
2
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief CUTLASS layout visualization example */ #pragma once #include <algorithm> #include <stdexcept> #include <vector> #include "cutlass/coord.h" #include "cutlass/util/reference/host/tensor_foreach.h" #include "register_layout.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permits copying dynamic vectors into static-length vectors template <typename TensorCoord, int Rank> struct vector_to_coord { vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) { coord[Rank - 1] = vec.at(Rank - 1); if (Rank > 1) { vector_to_coord<TensorCoord, Rank - 1>(coord, vec); } } }; /// Permits copying dynamic vectors into static-length vectors template <typename TensorCoord> struct vector_to_coord<TensorCoord, 1> { vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) { coord[0] = vec.at(0); } }; /// Permits copying dynamic vectors into static-length vectors template <typename TensorCoord> struct vector_to_coord<TensorCoord, 0> { vector_to_coord(TensorCoord &coord, std::vector<int> const &vec) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> std::ostream &operator<<(std::ostream &out, std::vector<T> const &vec) { auto it = vec.begin(); if (it != vec.end()) { out << *it; for (++it; it != vec.end(); ++it) { out << ", " << *it; } } return out; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Permits copying static-length vectors into dynamic vectors template <typename TensorCoord, int Rank> struct coord_to_vector { coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) { vec.at(Rank - 1) = coord[Rank - 1]; coord_to_vector<TensorCoord, Rank - 1>(vec, coord); } }; /// Permits copying static-length vectors into dynamic vectors template <typename TensorCoord> struct coord_to_vector<TensorCoord, 1> { coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) { vec.at(0) = coord[0]; } }; /// Permits copying static-length vectors into dynamic vectors template <typename TensorCoord> struct coord_to_vector<TensorCoord, 0> { coord_to_vector(std::vector<int> &vec, TensorCoord const &coord) { } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure representing an element in source memory struct Element { std::vector<int> coord; ///< logical coordinate of element (as vector) int offset; ///< linear offset from source memory int color; ///< enables coloring each element to indicate /// Default ctor inline Element(): offset(-1), color(0) { } /// Construct from logical coordinate and initial offset inline Element( std::vector<int> const &coord_, int offset_, int color_ = 0 ): coord(coord_), offset(offset_), color(color_) { } /// Returns true if element is in a defined state inline bool valid() const { return offset >= 0; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Visualizes memory layouts by constructing a 'shape' template <typename Layout_> class VisualizeLayout : public VisualizeLayoutBase { public: using Layout = Layout_; using TensorCoord = typename Layout::TensorCoord; using Stride = typename Layout::Stride; public: Options options; Layout layout; TensorCoord extent; std::vector<Element> elements; public: /// Initializes the problem space VisualizeLayout() { } /// visualization method bool visualize(Options const &options_) { options = options_; if (options.extent.size() != TensorCoord::kRank) { std::cerr << "--extent must have rank " << TensorCoord::kRank << " (given: " << options.extent.size() << ")" << std::endl; return false; } vector_to_coord<TensorCoord, TensorCoord::kRank>(extent, options.extent); // Construct the layout for a packed tensor if (options.stride.empty()) { layout = Layout::packed(extent); } else if (options.stride.size() != Stride::kRank) { std::cerr << "--stride must have rank " << Stride::kRank << " (given: " << options.stride.size() << ")" << std::endl; return false; } else { // Stride from Stride stride; vector_to_coord<Stride, Stride::kRank>(stride, options.stride); layout = Layout(stride); } // Resize elements, setting elements to 'undefined' state elements.resize(layout.capacity(extent)); // enumerate points in tensor space and assign cutlass::reference::host::TensorForEachLambda( extent, [&](TensorCoord coord) { std::vector<int> coord_vec(TensorCoord::kRank, 0); coord_to_vector<TensorCoord, TensorCoord::kRank>(coord_vec, coord); int offset = int(layout(coord)); if (offset >= int(elements.size())) { std::cerr << "Layout error - " << coord_vec << " is out of range (computed offset: " << offset << ", capacity: " << elements.size() << std::endl; throw std::out_of_range("(TensorForEach) layout error - coordinate out of range"); } elements.at(offset) = Element(coord_vec, offset); }); return true; } /// Verifies the layout satisfies vectorization requirements bool verify(bool verbose, std::ostream &out) { return true; } private: /// returns a pair (is_vectorizable, one_changing_rank) to determine if a /// vector exists (consecutive logical coordinates or uniformly invalid) /// at the given location. std::pair< bool, int > _is_vectorizable(int i) const { // (all elements are invalid) or // (all elements are valid AND // exactly one rank is changing AND // elements are consecutive) // Don't need vectorization. if (options.vectorize <= 2) return std::make_pair(false, -1); // Boundary check. if (i > int(elements.size()) || (i + options.vectorize - 1) > int(elements.size())) return std::make_pair(false, -1); // Check if either all elements are valid or invalid. bool all_elements_invalid = std::all_of( elements.begin() + i, elements.begin() + i + options.vectorize, [](Element const &e) { return !e.valid(); }); bool all_elements_valid = std::all_of( elements.begin() + i, elements.begin() + i + options.vectorize, [](Element const &e) { return e.valid(); }); if (!all_elements_invalid && !all_elements_valid) return std::make_pair(false, -1); // From here, it is vectorizable. if (all_elements_invalid) return std::make_pair(true, -1); // Check if only exactly one rank is changing. int one_changing_rank = -1; for (int j = 0; j < options.vectorize; ++j) { for (int r = 0; r < TensorCoord::kRank; ++r) { if (elements.at(i + j).coord.at(r) != elements.at(i).coord.at(r)) { if (one_changing_rank == -1) { one_changing_rank = r; } else if (one_changing_rank != r) { return std::make_pair(false, -1); } } } } return std::make_pair(true, one_changing_rank); } /// Prints a vector of elements void _print_vector(std::ostream &out, int i, int one_changing_rank) { Element const &base_element = elements.at(i); if (base_element.valid()) { out << "("; for (int r = 0; r < TensorCoord::kRank; ++r) { if (r) { out << ", "; } if (r == one_changing_rank) { out << base_element.coord.at(r) << ".." << (base_element.coord.at(r) + options.vectorize - 1); } else { out << base_element.coord.at(r); } } out << ")"; } else { out << " "; } } /// Prints a single element void _print_element(std::ostream &out, int k) { Element const &element = elements.at(k); if (element.valid()) { out << "("; for (int v = 0; v < TensorCoord::kRank; ++v) { out << (v ? ", " : "") << element.coord.at(v); } out << ")"; } else { out << " "; } } public: /// Pretty-prints the layout to the console void print_csv(std::ostream &out, char delim = '|', char new_line = '\n') { int row = -1; for (int i = 0; i < int(elements.size()); i += options.vectorize) { if (i % options.output_shape.at(0)) { out << delim; } else { if (row >= 0) { out << new_line; } ++row; if (row == options.output_shape.at(1)) { out << new_line; row = 0; } } auto is_vector = _is_vectorizable(i); if (is_vector.first) { _print_vector(out, i, is_vector.second); // print a vector starting at element i } else { for (int j = 0; j < options.vectorize; ++j) { // print individual elements [i..i+j) _print_element(out, i + j); } } } out << new_line << std::flush; } /// Help message virtual std::ostream &print_help(std::ostream &out) { out << "TensorCoord rank " << TensorCoord::kRank << ", Stride rank: " << Stride::kRank; return out; } }; /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/03_visualize_layout/visualize_layout.h/0
{ "file_path": "cutlass/examples/03_visualize_layout/visualize_layout.h", "repo_id": "cutlass", "token_count": 4166 }
3
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Planar Complex Array Example This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels which execute a batch of matrix products, loading problem sizes and matrix base pointers from arrays in global memory. These kernels represent complex matrices by storing the real and imaginary parts of the matrix in disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts as either column-major or row-major layouts with a single leading dimension indicating the stride between columns or rows. The CUTLASS Library collects multiple template instantiations in a data structure and offers a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures. CUTLASS decouples matrix layout from complex transformation, so four possible transformations are possible on the A and B operands: n: column-major c: column-major complex conjugate t: row-major h: row-major complex conjugate To build strictly the planar complex kernels needed for general application, execute the following CMake command in an empty build directory. $ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \ -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex This builds all planar complex GEMM variants for Volta and Turing architectures. To build strictly the kernels needed for this example, an even narrower filter string may be specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for the 'CN' layout configuration (conjugate A operand with both A and B as column-major). $ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \ -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_array_f16*cn $ make 11_planar_complex_array $ ./examples/11_planar_complex_array/11_planar_complex_array --m=2048 --n=1024 --k=512 --batch=10 */ #include <iostream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor_planar_complex.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/device/gemm_planar_complex.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/library/handle.h" ///////////////////////////////////////////////////////////////////////////////////////////////// /// Result structure struct Result { double runtime_ms; double gflops; cutlass::Status status; cudaError_t error; bool passed; // // Methods // Result( double runtime_ms = 0, double gflops = 0, cutlass::Status status = cutlass::Status::kSuccess, cudaError_t error = cudaSuccess ): runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; int batch_count; cutlass::complex<float> alpha; cutlass::complex<float> beta; bool reference_check; int iterations; Options(): help(false), problem_size({1024, 1024, 1024}), batch_count(1), reference_check(true), iterations(20), alpha(1), beta() { } bool valid() { return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("batch", batch_count); cmd.get_cmd_line_argument("alpha", alpha.real()); cmd.get_cmd_line_argument("alpha_i", alpha.imag()); cmd.get_cmd_line_argument("beta", beta.real()); cmd.get_cmd_line_argument("beta_i", beta.imag()); cmd.get_cmd_line_argument("iterations", iterations); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "11_planar_complex_array example\n\n" << " This example uses the CUTLASS Library to execute Planar Complex Array GEMM computations.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M dimension\n" << " --n=<int> GEMM N dimension\n" << " --k=<int> GEMM K dimension\n" << " --batch=<int> Number of GEMM operations executed in one batch\n" << " --alpha=<f32> Epilogue scalar alpha (real part)\n" << " --alpha_i=<f32> Epilogue scalar alpha (imaginary part)\n" << " --beta=<f32> Epilogue scalar beta (real part)\n\n" << " --beta_i=<f32> Epilogue scalar beta (imaginary part)\n\n" << " --iterations=<int> Number of profiling iterations to perform.\n"; out << "\n\nExamples:\n\n" << "$ ./examples/11_planar_complex_array/11_planar_complex_array\n\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s) const { // Number of real-valued multiply-adds int64_t fmas = problem_size.product() * batch_count * 4; // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// /// Performance test environment for planar complex class TestbedPlanarComplex { public: // Half-precision input and output using Element = cutlass::half_t; // Configurations for layouts and internal computation using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::ColumnMajor; using LayoutC = cutlass::layout::ColumnMajor; using ElementCompute = float; using ElementAccumulator = float; // // Data members // cutlass::library::Handle handle; cutlass::gemm::GemmCoord problem_size; int batch_count; cutlass::DeviceAllocation<Element> tensor_A; cutlass::DeviceAllocation<Element> tensor_B; cutlass::DeviceAllocation<Element> tensor_C; cutlass::DeviceAllocation<Element> tensor_D; cutlass::DeviceAllocation<Element> tensor_D_ref; cutlass::DeviceAllocation<void *> ptr_A_real; cutlass::DeviceAllocation<void *> ptr_A_imag; cutlass::DeviceAllocation<void *> ptr_B_real; cutlass::DeviceAllocation<void *> ptr_B_imag; cutlass::DeviceAllocation<void *> ptr_C_real; cutlass::DeviceAllocation<void *> ptr_C_imag; cutlass::DeviceAllocation<void *> ptr_D_real; cutlass::DeviceAllocation<void *> ptr_D_imag; // // Methods // TestbedPlanarComplex( Options const &options ): problem_size(options.problem_size), batch_count(options.batch_count) { // Allocate device memory for batched planar complex GEMM tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2); tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2); tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2); tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2); tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2); ptr_A_real.reset(batch_count); ptr_A_imag.reset(batch_count); ptr_B_real.reset(batch_count); ptr_B_imag.reset(batch_count); ptr_C_real.reset(batch_count); ptr_C_imag.reset(batch_count); ptr_D_real.reset(batch_count); ptr_D_imag.reset(batch_count); } void initialize() { uint64_t seed = 1073; // Use small integers to simplify correctness checking int scope_max = 6; int scope_min = -6; cutlass::reference::device::BlockFillRandomUniform( tensor_A.get(), tensor_A.size(), seed, Element(scope_max), Element(scope_min), 0); cutlass::reference::device::BlockFillRandomUniform( tensor_B.get(), tensor_B.size(), seed * 2019, Element(scope_max), Element(scope_min), 0); cutlass::reference::device::BlockFillRandomUniform( tensor_C.get(), tensor_C.size(), seed * 2020, Element(scope_max), Element(scope_min), 0); } Result profile(Options const &options) { Result result; initialize(); Element *ptr_A = tensor_A.get(); Element *ptr_B = tensor_B.get(); Element *ptr_C = tensor_C.get(); Element *ptr_D = tensor_D.get(); int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2; int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2; int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2; int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2; typename LayoutA::Stride::Index lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0); typename LayoutB::Stride::Index ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0); typename LayoutC::Stride::Index ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0); typename LayoutC::Stride::Index ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0); int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k(); int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n(); int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n(); int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n(); // // Configure pointers in global memory // struct { Element *base; void **ptr_real; void **ptr_imag; int64_t batch_stride; int64_t imag_stride; } tensors[] = { { tensor_A.get(), ptr_A_real.get(), ptr_A_imag.get(), batch_stride_A, imag_stride_A}, { tensor_B.get(), ptr_B_real.get(), ptr_B_imag.get(), batch_stride_B, imag_stride_B}, { tensor_C.get(), ptr_C_real.get(), ptr_C_imag.get(), batch_stride_C, imag_stride_C}, { tensor_D.get(), ptr_D_real.get(), ptr_D_imag.get(), batch_stride_D, imag_stride_D} }; for (auto const &tensor : tensors) { for (int idx = 0; idx < batch_count; ++idx) { void *ptr_real = tensor.base + idx * tensor.batch_stride; void *ptr_imag = tensor.base + idx * tensor.batch_stride + tensor.imag_stride; cudaError_t error = cudaMemcpy( tensor.ptr_real + idx, &ptr_real, sizeof(void *), cudaMemcpyHostToDevice); if (error != cudaSuccess) { throw std::runtime_error("Failed to copy pointer to device memory"); } error = cudaMemcpy( tensor.ptr_imag + idx, &ptr_imag, sizeof(void *), cudaMemcpyHostToDevice); if (error != cudaSuccess) { throw std::runtime_error("Failed to copy pointer to device memory"); } } } // // Construct events // cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return -1; } } // Record an event at the start of a series of GEMM operations result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // // Run profiling loop // for (int iter = 0; iter < options.iterations; ++iter) { // // Execute the planar complex array GEMM kernel via the CUTLASS Library's // dispatch routines. // // Note, for planar complex array GEMM kernels, all numeric type arguments // specify the data type of the base real types. These are understood to // apply to planar complex representations of matrices in memory and to complex<T> // structures for scalars. // // See tools/library/include/cutlass/library/handle.h for more details. // result.status = handle.gemm_planar_complex_array( problem_size.m(), // expected GEMM M dimension problem_size.n(), // expected GEMM N dimension problem_size.k(), // expected GEMM K dimension batch_count, // Number of batched elements nullptr, nullptr, nullptr, cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars &options.alpha, // Pointer to alpha scalar, of type complex<T> cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand ptr_A_real.get(), // Pointer to array of pointers to real part of A matrix ptr_A_imag.get(), // Pointer to array of pointers to imaginary part of A matrix lda, // Leading dimension of real part of A matrix lda, // Leading dimension of imaginary part of A matrix cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand ptr_B_real.get(), // Pointer to array of pointers to real part of B matrix ptr_B_imag.get(), // Pointer to array of pointers to imaginary part of B matrix ldb, // Leading dimension of real part of B matrix ldb, // Leading dimension of imaginary part of B matrix &options.beta, // Pointer to beta scalar, of type complex<T> cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices ptr_C_real.get(), // Pointer to array of pointers to real part of C matrix ptr_C_imag.get(), // Pointer to array of pointers to imaginary part of C matrix ldc, // Leading dimension of real part of C matrix ldc, // Leading dimension of imaginary part of C matrix ptr_D_real.get(), // Pointer to array of pointers to real part of D matrix ptr_D_imag.get(), // Pointer to array of pointers to imaginary part of D matrix ldd, // Leading dimension of real part of D matrix ldd // Leading dimension of imaginary part of D matrix ); if (result.status != cutlass::Status::kSuccess) { std::cerr << "CUTLASS internal error - configuration not supported" << std::endl; return result; } } // // Stop profiling loop // // Record an event when the GEMM operations have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Compute average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); result.gflops = options.gflops(result.runtime_ms / 1000.0); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } if (handle.get_last_operation()) { std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl; } // // Compute reference in device code // if (options.reference_check) { result.passed = true; for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) { cutlass::reference::device::GemmPlanarComplex< Element, LayoutA, Element, LayoutB, Element, LayoutC, ElementAccumulator >( problem_size, options.alpha, {tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A}, cutlass::ComplexTransform::kConjugate, {tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B}, cutlass::ComplexTransform::kNone, options.beta, {tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C}, {tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D} ); Element epsilon = 0.1_hf; Element nonzero_floor = 0.1_hf; result.passed = cutlass::reference::device::BlockCompareRelativelyEqual( tensor_D.get() + idx * batch_stride_D, tensor_D_ref.get() + idx * batch_stride_D, batch_stride_D, epsilon, nonzero_floor ); } if (result.passed) { std::cout << "Reference check passed." << std::endl; } else { std::cerr << "Error - reference check failed." << std::endl; } } std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl; std::cout << " GFLOPs: " << result.gflops << std::endl; return result; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // // Volta Tensor Core operations are first available in CUDA 10.1 Toolkit. // // Turing Tensor Core operations are first available in CUDA 10.2 Toolkit. // cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (props.major < 7) { std::cerr << "Tensor Core operations must be run on a machine with compute capability at least 70." << std::endl; // Returning zero so this passes on older architectures. Its actions are no-op. return 0; } else if (props.major == 7 && props.minor <= 2) { // // If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) { std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl; // Returning zero so this passes on older Toolkits. Its actions are no-op. return 0; } } else if (props.major == 7 && props.minor >= 5) { // // If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example. // if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; // Returning zero so this passes on older Toolkits. Its actions are no-op. return 0; } } else { // NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond. // // fall through } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } TestbedPlanarComplex testbed(options); Result result = testbed.profile(options); return result.passed ? 0 : -1; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/11_planar_complex_array/planar_complex_array.cu/0
{ "file_path": "cutlass/examples/11_planar_complex_array/planar_complex_array.cu", "repo_id": "cutlass", "token_count": 9173 }
4
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions combine threadblock-scoped matrix multiply-add with the appropriate threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/conv/kernel/default_conv2d.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_analytic.h" #include "cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_optimized.h" #include "cutlass/conv/threadblock/conv2d_fprop_filter_tile_access_iterator_optimized.h" #include "cutlass/transform/threadblock/predicated_vector_access_iterator.h" #include "cutlass/transform/threadblock/vector_iterator.h" #include "cutlass/transform/warp/vector_fragment_iterator.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "kernel/b2b_implicit_gemm_convolution.h" #include "threadblock/b2b_implicit_gemm_pipelined.h" #include "threadblock/b2b_implicit_gemm_multistage.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines a kernel for Conv2dFprop template < typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementAccumulator, typename OperatorClass, typename ArchTag, typename ThreadblockShape0, typename ThreadblockShape1, typename WarpShape0, typename WarpShape1, typename InstructionShape, typename EpilogueOutputOp0, typename EpilogueOutputOp1, typename ThreadblockSwizzle, int Stages, typename MathOperatorTag, conv::IteratorAlgorithm IteratorAlgorithm = IteratorAlgorithm::kAnalytic, bool SmemAccumulator = false > struct DefaultB2bConv2dFprop; } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/kernel/default_b2b_conv2d_fprop.h", "repo_id": "cutlass", "token_count": 1110 }
5
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" #include "threadblock/b2b_mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape0_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA0_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA0_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA0, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB0_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB0_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB0, /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape1_, /// Iterates over the intermediate accumulator tile // (concept::MmaTensorOpFragmentIterator) typename FragmentIteratorA1_, /// Iterates over vectors of scale and bias vector in global memory // (concept: VectorIterator) typename IteratorAccumulatorScaleBias_, /// WarpIterator to load Scale or Bias vector from threadblock fragment typename FragmentIteratorA1ScaleBias_, /// Iterates over tiles of B operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB1, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Output operator for 1st Gemm(concept: epilogue::thread::LinearCombinationClamp, etc...) typename OutputOp_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy0_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy1_, /// Number of stages, int Stages, /// Used for partial specialization typename Enable = bool> class B2bMmaMultistage : public B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages> { public: ///< Base class using Base = B2bMmaBase<Shape0_, Shape1_, Policy0_, Policy1_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape0 = Shape0_; ///< Iterates over tiles of A operand in global memory using IteratorA0 = IteratorA0_; using IteratorA = IteratorA0; ///< Iterates over tiles of B operand in global memory using IteratorB0 = IteratorB0_; using IteratorB = IteratorB0; ///< Policy describing tuning details using Policy0 = Policy0_; using SmemIteratorA0 = SmemIteratorA0_; using SmemIteratorB0 = SmemIteratorB0_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape1 = Shape1_; ///< Iterates over intermediate accumulator tile using FragmentIteratorA1 = FragmentIteratorA1_; ///< Iterates over tiles of the scale and bias vectors in global memory using IteratorAccumulatorScaleBias = IteratorAccumulatorScaleBias_; ///< WarpIterator to load Scale or Bias vector from threadblock fragment using FragmentIteratorA1ScaleBias = FragmentIteratorA1ScaleBias_; ///< Iterates over tiles of B operand in global memory using IteratorB1 = IteratorB1_; ///< Policy describing tuning details using Policy1 = Policy1_; ///< Export Policy0 as the threadblock-level Mma's policy using Policy = Policy0; using Shape = Shape0; using SmemIteratorB1 = SmemIteratorB1_; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; ///< Epilogue after 1st Gemm using OutputOp = OutputOp_; static const bool PerChannelScale = (OutputOp::kScale == epilogue::thread::ScaleType::OnlyAlphaPerChannelScaling); static cutlass::arch::CacheOperation::Kind const kCacheOpA0 = CacheOpA0; static cutlass::arch::CacheOperation::Kind const kCacheOpB0 = CacheOpB0; static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1; // // Dependent types // /// Fragment of accumulator tile using FragmentC0 = typename Policy0::Operator::FragmentC; /// Warp-level Mma using Operator0 = typename Policy0::Operator; /// Fragment of Scale and Bias loaded from global memory using FragmentA1ScaleBias = typename IteratorAccumulatorScaleBias::Fragment; /// Fragment of accumulator tile using FragmentC1 = typename Policy1::Operator::FragmentC; /// Warp-level Mma using Operator1 = typename Policy1::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA0 = Operator0::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB0 = Operator0::kTransformB; /// Complex transform on B operand static ComplexTransform const kTransformB1 = Operator1::kTransformB; /// Complex transform exports needed by higher-level kernels static ComplexTransform const kTransformA = kTransformA0; static ComplexTransform const kTransformB = kTransformB0; /// Internal structure exposed for introspection. struct Detail { static_assert(Base::kWarpGemmIterations0 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); static_assert(Base::kWarpGemmIterations1 > 1, "The pipelined structure requires at least two warp-level " "GEMM operations."); /// Number of cp.async instructions to load one stage of operand A static int const TBLoadIterationsA0 = IteratorA0::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const TBLoadIterationsB0 = IteratorB0::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const TBLoadIterationsB1 = IteratorB1::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA0 = (TBLoadIterationsA0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB0 = (TBLoadIterationsB0 + Base::kWarpGemmIterations0 - 1) / Base::kWarpGemmIterations0; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB1 = (TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1; }; private: using WarpLoadedFragmentA0 = typename Operator0::FragmentA; using WarpLoadedFragmentB0 = typename Operator0::FragmentB; /// Warp Fragment of operand A1 loaded from accmulator tile using WarpLoadedFragmentA1 = typename FragmentIteratorA1::Fragment; using WarpLoadedFragmentA1ScaleBias = typename FragmentIteratorA1ScaleBias::Fragment; using WarpLoadedFragmentB1 = typename Operator1::FragmentB; using WarpTransformedFragmentA0 = typename Operator0::TransformedFragmentA; using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB; using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA; using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA0 smem_iterator_A0_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB0 smem_iterator_B0_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB1 smem_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE B2bMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::B2bMmaSharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx, ///< GEMM0 N is used for accumulator extent int problem_size_0_n ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A0_(shared_storage.shared_storage0.operand_A_ref(), thread_idx), smem_iterator_B0_(shared_storage.shared_storage0.operand_B_ref(), thread_idx), smem_iterator_B1_(shared_storage.shared_storage1.operand_B_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_k = warp_idx / (Base::WarpCount0::kM * Base::WarpCount0::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount0::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount0::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A0_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations0 * warp_idx_k}); this->warp_tile_iterator_B0_.add_tile_offset( {Base::kWarpGemmIterations0 * warp_idx_k, warp_idx_n}); this->warp_tile_iterator_B1_.add_tile_offset( {Base::kWarpGemmIterations1 * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance_0(IteratorA0 &iterator_A0, IteratorB0 &iterator_B0, int group_start_A0 = 0, int group_start_B0 = 0) { iterator_A0.set_iteration_index(group_start_A0 * IteratorA0::kAccessesPerVector); this->smem_iterator_A0_.set_iteration_index(group_start_A0); // Load for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA0; ++j) { if (group_start_A0 + j < Detail::TBLoadIterationsA0) { typename IteratorA0::AccessType *dst_ptr = reinterpret_cast<typename IteratorA0::AccessType *>( this->smem_iterator_A0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value * IteratorA0::ThreadMap::kElementsPerAccess / IteratorA0::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A0.get(); cutlass::arch::cp_async<kSrcBytes, kCacheOpA0>( dst_ptr + v, gmem_ptr, iterator_A0.valid()); ++iterator_A0; } ++this->smem_iterator_A0_; } } iterator_B0.set_iteration_index(group_start_B0 * IteratorB0::kAccessesPerVector); this->smem_iterator_B0_.set_iteration_index(group_start_B0); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB0; ++j) { if (group_start_B0 + j < Detail::TBLoadIterationsB0) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / IteratorB0::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B0.get(); cutlass::arch::cp_async<kSrcBytes, kCacheOpB0>( dst_ptr + v, gmem_ptr, iterator_B0.valid()); ++iterator_B0; } ++this->smem_iterator_B0_; } } } CUTLASS_DEVICE void copy_tiles_and_advance_1(IteratorB1 &iterator_B1, int group_start_B1 = 0) { iterator_B1.set_iteration_index(group_start_B1 * IteratorB1::kAccessesPerVector); this->smem_iterator_B1_.set_iteration_index(group_start_B1); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) { if (group_start_B1 + j < Detail::TBLoadIterationsB1) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / IteratorB1::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B1.get(); cutlass::arch::cp_async<kSrcBytes, kCacheOpB1>( dst_ptr + v, gmem_ptr, iterator_B1.valid()); ++iterator_B1; } ++this->smem_iterator_B1_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations_0, ///< destination accumulator tile FragmentC1 &accum, ///< iterator over A0 operand in global memory IteratorA0 iterator_A0, ///< iterator over B0 operand in global memory IteratorB0 iterator_B0, ///< iterator over A1 operand scale vector in global memory IteratorAccumulatorScaleBias iterator_A1_scale, ///< iterator over A1 operand bias vector in global memory IteratorAccumulatorScaleBias iterator_A1_bias, ///< iterator over B1 operand in global memory IteratorB1 iterator_B1, ///< initial value of accumulator FragmentC0 const &src_accum, ///< epilogue operation after 1st Gemm OutputOp output_op_0) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations_0) { iterator_A0.clear_mask(gemm_k_iterations_0 == 0); iterator_B0.clear_mask(gemm_k_iterations_0 == 0); iterator_A0.set_iteration_index(0); this->smem_iterator_A0_.set_iteration_index(0); // Load for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsA0; ++j) { typename IteratorA0::AccessType *dst_ptr = reinterpret_cast<typename IteratorA0::AccessType *>( this->smem_iterator_A0_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA0::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA0::Element>::value * IteratorA0::ThreadMap::kElementsPerAccess / IteratorA0::kAccessesPerVector / 8; int src_bytes = (iterator_A0.valid() ? kSrcBytes : 0); cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA0>( dst_ptr + v, iterator_A0.get(), iterator_A0.valid()); ++iterator_A0; } ++this->smem_iterator_A0_; } iterator_B0.set_iteration_index(0); this->smem_iterator_B0_.set_iteration_index(0); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsB0; ++j) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / IteratorB0::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB0>( dst_ptr + v, iterator_B0.get(), iterator_B0.valid()); ++iterator_B0; } ++this->smem_iterator_B0_; } // Move to the next stage iterator_A0.add_tile_offset({0, 1}); iterator_B0.add_tile_offset({1, 0}); this->smem_iterator_A0_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand FragmentC0 accum0 = src_accum; // DEPBAR+SYNC cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA0 warp_loaded_frag_A0[2]; WarpLoadedFragmentB0 warp_loaded_frag_B0[2]; WarpTransformedFragmentA0 warp_transformed_frag_A0[2]; WarpTransformedFragmentB0 warp_transformed_frag_B0[2]; Operator0 warp_mma0; this->warp_tile_iterator_A0_.set_kgroup_index(0); this->warp_tile_iterator_B0_.set_kgroup_index(0); this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[0]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; iterator_A0.clear_mask(gemm_k_iterations_0 == 0); iterator_B0.clear_mask(gemm_k_iterations_0 == 0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma0.transform(warp_transformed_frag_A0[0], warp_transformed_frag_B0[0], warp_loaded_frag_A0[0], warp_loaded_frag_B0[0]); // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations_0 > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations0; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations0); this->warp_tile_iterator_A0_.load(warp_loaded_frag_A0[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A0_; ++this->warp_tile_iterator_B0_; if (warp_mma_k > 0) warp_mma0.transform(warp_transformed_frag_A0[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], warp_loaded_frag_A0[warp_mma_k % 2], warp_loaded_frag_B0[warp_mma_k % 2]); warp_mma0( accum0, warp_transformed_frag_A0[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], accum0 ); // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations0 - 1) { int group_start_iteration_A0, group_start_iteration_B0; group_start_iteration_A0 = warp_mma_k * Detail::kAccessesPerGroupA0; group_start_iteration_B0 = warp_mma_k * Detail::kAccessesPerGroupB0; copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0, group_start_iteration_B0); } if (warp_mma_k + 2 == Base::kWarpGemmIterations0) { int group_start_iteration_A0, group_start_iteration_B0; group_start_iteration_A0 = (warp_mma_k + 1) * Detail::kAccessesPerGroupA0; group_start_iteration_B0 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB0; copy_tiles_and_advance_0(iterator_A0, iterator_B0, group_start_iteration_A0, group_start_iteration_B0); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A0.add_tile_offset({0, 1}); iterator_B0.add_tile_offset({1, 0}); this->smem_iterator_A0_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A0_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A0_.add_tile_offset( {0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0}); this->warp_tile_iterator_B0_.add_tile_offset( {-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations0, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations_0; iterator_A0.clear_mask(gemm_k_iterations_0 == 0); iterator_B0.clear_mask(gemm_k_iterations_0 == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations0) warp_mma0.transform(warp_transformed_frag_A0[(warp_mma_k + 1) % 2], warp_transformed_frag_B0[(warp_mma_k + 1) % 2], warp_loaded_frag_A0[(warp_mma_k + 1) % 2], warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); } } // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); // 2nd Gemm /// Iterator to load a warp-scoped tile of A1 operand from intermediate accumulator tile FragmentIteratorA1 warp_tile_iterator_A1_(accum0); FragmentA1ScaleBias tb_frag_A1_scale; FragmentA1ScaleBias tb_frag_A1_bias; FragmentIteratorA1ScaleBias warp_tile_iterator_A1_scale_(tb_frag_A1_scale); FragmentIteratorA1ScaleBias warp_tile_iterator_A1_bias_(tb_frag_A1_bias); if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; // // Prologue // int gemm_k_iterations_1 = (FragmentIteratorA1::Policy::kIterations + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1; // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations_1) { iterator_B1.clear_mask(gemm_k_iterations_1 == 0); iterator_B1.set_iteration_index(0); this->smem_iterator_B1_.set_iteration_index(0); // Load for operand B CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / IteratorB1::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>( dst_ptr + v, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; } ++this->smem_iterator_B1_; } // Move to the next stage iterator_B1.add_tile_offset({1, 0}); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // DEPBAR+SYNC cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA1 warp_loaded_frag_A1[2]; WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_scale[2]; WarpLoadedFragmentA1ScaleBias warp_loaded_frag_A1_bias[2]; WarpLoadedFragmentB1 warp_loaded_frag_B1[2]; WarpTransformedFragmentA1 warp_transformed_frag_A1[2]; WarpTransformedFragmentB1 warp_transformed_frag_B1[2]; Operator1 warp_mma1; if(PerChannelScale) { warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]); ++warp_tile_iterator_A1_scale_; } warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[0]); ++warp_tile_iterator_A1_bias_; warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0], warp_loaded_frag_A1_bias[0], output_op_0); ++warp_tile_iterator_A1_; this->warp_tile_iterator_B1_.set_kgroup_index(0); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]); ++this->warp_tile_iterator_B1_; iterator_B1.clear_mask(gemm_k_iterations_1 == 0); smem_write_stage_idx = Base::kStages - 1; smem_read_stage_idx = 0; warp_mma1.transform(warp_transformed_frag_A1[0], warp_transformed_frag_B1[0], warp_loaded_frag_A1[0], warp_loaded_frag_B1[0]); // // Mainloop // gemm_k_iterations_1 = (FragmentIteratorA1::Policy::kIterations + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1 - (Base::kStages - 1); CUTLASS_PRAGMA_UNROLL for (; gemm_k_iterations_1 > (-Base::kStages + 1); gemm_k_iterations_1--) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { // Load threadblock-level scale/bias vector from global memory if (warp_mma_k + 1 == Base::kWarpGemmIterations1) { if(PerChannelScale) { tb_frag_A1_scale.clear(); iterator_A1_scale.load(tb_frag_A1_scale); ++iterator_A1_scale; } tb_frag_A1_bias.clear(); iterator_A1_bias.load(tb_frag_A1_bias); ++iterator_A1_bias; } // Load warp-level scale bias fragment from threadblock scale/bias vector if(PerChannelScale) { warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_scale_; } warp_tile_iterator_A1_bias_.load(warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2]); ++warp_tile_iterator_A1_bias_; // Load warp-level tile from accumulator fragment warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2], warp_loaded_frag_A1_bias[(warp_mma_k + 1) % 2], output_op_0); ++warp_tile_iterator_A1_; // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations1); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_B1_; if (warp_mma_k > 0) warp_mma1.transform(warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], warp_loaded_frag_A1[warp_mma_k % 2], warp_loaded_frag_B1[warp_mma_k % 2]); warp_mma1( accum, warp_transformed_frag_A1[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], accum ); // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations1 - 1) { int group_start_iteration_B1; group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1; copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); } if (warp_mma_k + 2 == Base::kWarpGemmIterations1) { int group_start_iteration_B1; group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1; copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until kStages-2 stages have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_B1.add_tile_offset({1, 0}); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_B1_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations1, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } iterator_B1.clear_mask(gemm_k_iterations_1 == 1); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations1) warp_mma1.transform(warp_transformed_frag_A1[(warp_mma_k + 1) % 2], warp_transformed_frag_B1[(warp_mma_k + 1) % 2], warp_loaded_frag_A1[(warp_mma_k + 1) % 2], warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); } } // Commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage.h/0
{ "file_path": "cutlass/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage.h", "repo_id": "cutlass", "token_count": 15041 }
6
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is that we can load in F32 data and convert them implicitly to tf32 inside the SYMM kernel which means no change is needed to accelerate traditional F32 data by using NVIDIA Ampere architecture. We can use the tf32 mode of tensor core to emulate a fast accurate SYMM kernel which is accelerated using Ampere Tensor Cores (see include/cutlass/gemm/warp/mma_tensor_op_fast_f32.h). The trick is very simple a x b = (a_big + a_small) x (b_big + b_small) = a_big x b_big + a_big x b_small + a_small x b_big big = convert_to_tf32(F32) small = convert_to_tf32(F32 - big) a_small x b_small is discarded because they are too small. This example demonstrates usage of this kernel, along with accuracy measurements w.r.t. actual F32 results (SSYMM from cuBLAS) and against F64 results (DSYMM from CUTLASS) To enable this feature, the only change needs to make is to change the default OpMultiplyAdd to OpMultiplyAddFastF32. Now, we have two different flavors of SSYMM in the profiler for Ampere: s1688symm // Use 3xTF32 to emulate F32. F32 in, converted in TF32-big and TF32-small internally, // accumulated in F32, F32 out. s1688tf32symm // Use 1xTF32. F32 in, converted to one TF32 internally, accumulated in F32, F32 out. */ #include <iostream> #include <vector> #include <limits> #include "cutlass/blas3.h" #include "cutlass/gemm/device/symm.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/symm.h" #include "cutlass/util/reference/host/tensor_reduce.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/error_metrics.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" #if CUTLASS_ENABLE_CUBLAS #include <cublas_v2.h> #endif /////////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; float alpha; float beta; std::string rand_mode; int seed; Options(): help(false), problem_size({4096, 4096, 4096}), seed(1), alpha(1), beta(), rand_mode("uniform") { } bool valid() { // // CUTLASS attempts to load 128b vectors of F32 elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 4 elements. // int const kAlignment = 4; if ((problem_size.m() % kAlignment) || (problem_size.n() % kAlignment) || (problem_size.k() % kAlignment)) { // misaligned tensors return false; } return true; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); // Since the kernels in this example are in Left Side Mode cmd.get_cmd_line_argument("m", problem_size.k()); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("seed", seed); cmd.get_cmd_line_argument("rand_mode", rand_mode); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "33_ampere_3xtf32_tensorop_symm example\n\n" << " This example uses the CUTLASS Library to execute 3xTF32 tensorop SYMM computations.\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> SYMM M dimension\n" << " --n=<int> SYMM N dimension\n" << " --alpha=<f32> Epilogue scalar alpha\n" << " --beta=<f32> Epilogue scalar beta\n\n" << " --rand_mode=<string> gauss / uniform*\n\n" << " --seed=<int> Random number seed (1*)\n\n"; out << "\n\nExamples:\n\n" << "$ ./examples/33_ampere_3xtf32_tensorop_symm/33_ampere_3xtf32_tensorop_symm --m=1024 --n=512 \\\n" << " --alpha=2 --beta=1 \n\n"; return out; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Matrix B and Matrix C (since that's what cuBLAS supports, CUTLASS supports Row Major too) using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // Symmetric Matrix A is in Left Side mode constexpr cutlass::SideMode SideModeA = cutlass::SideMode::kLeft; // Symmetric Matrix A is in Lower Filled mode constexpr cutlass::FillMode FillModeA = cutlass::FillMode::kLower; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 64, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 32, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< float, // <- data type of output matrix 128 / cutlass::sizeof_bits<float>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too float, // <- data type of accumulator float>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 3; // Alignment constexpr int Alignment = 4; // // CUTLASS Symm Operators (SSYM: Symm_3xTF32, Symm_1xTF32, DSYMM: Symm_F64) // // Symm_3xTF32 using Symm_3xTF32 = cutlass::gemm::device::Symm< float, LayoutInputA, SideModeA, FillModeA, float, LayoutInputB, float, LayoutOutput, float, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, 1, // Symmetric matrix is always align 1 Alignment, false, cutlass::arch::OpMultiplyAddFastF32>; // Symm_1xTF32 using Symm_1xTF32 = cutlass::gemm::device::Symm< float, LayoutInputA, SideModeA, FillModeA, float, LayoutInputB, float, LayoutOutput, float, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages, 1, // Symmetric matrix is always align 1 Alignment, false, cutlass::arch::OpMultiplyAdd>; // Symm_F64 using Symm_F64 = cutlass::gemm::device::Symm< double, LayoutInputA, SideModeA, FillModeA, double, LayoutInputB, double, LayoutOutput, double, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<32, 32, 16>, cutlass::gemm::GemmShape<16, 16, 16>, cutlass::gemm::GemmShape<8, 8, 4>, cutlass::epilogue::thread::LinearCombination< double, 1, double, double >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 4>; bool run(Options &options) { // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size = options.problem_size; //////////////////////////////////////////////////////////////////////////////// /// 1. Initialize F32 Precision input tensors using CUTLASS helper functions //////////////////////////////////////////////////////////////////////////////// cutlass::HostTensor<float, LayoutInputA> tensor_a_F32(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<float, LayoutInputB> tensor_b_F32(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<float, LayoutOutput> tensor_c_F32(problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<float, LayoutOutput> tensor_d_F32(problem_size.mn()); // <- Create matrix D with dimensions M x N if (options.rand_mode == "uniform") { const float min = -1; const float max = 1; // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_F32.host_view(), options.seed, double(max), double(min)); // <- Fill matrix C on host with uniform-distribution random data } else if (options.rand_mode == "gauss") { // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomGaussian( tensor_a_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix A on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_b_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix B on host with gaussian-distribution random data cutlass::reference::host::TensorFillRandomGaussian( tensor_c_F32.host_view(), options.seed, double(0), double(5)); // <- Fill matrix C on host with gaussian-distribution random data } cutlass::reference::host::TensorFill( tensor_d_F32.host_view()); // <- fill matrix D on host with zeros // Copy data from host to GPU tensor_a_F32.sync_device(); tensor_b_F32.sync_device(); tensor_c_F32.sync_device(); tensor_d_F32.sync_device(); //////////////////////////////////////////////////////////////////////////////// /// 2. Initialize F64 tensors, Output tensors and setup arguments //////////////////////////////////////////////////////////////////////////////// // Symm F64 input operands (A, B, C) cutlass::HostTensor<double, LayoutInputA> tensor_a_F64(problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<double, LayoutInputB> tensor_b_F64(problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<double, LayoutOutput> tensor_c_F64(problem_size.mn()); // <- Create matrix C with dimensions M x N // Symm output (D) for SYMM_3xTF32 cutlass::HostTensor<float, LayoutOutput> tensor_d_3xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Symm output (D) for SYMM_1xTF32 cutlass::HostTensor<float, LayoutOutput> tensor_d_1xTF32(problem_size.mn()); // <- Create matrix D with dimensions M x N // Symm output (D) for SYMM_F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_F64(problem_size.mn()); // <- Create matrix D with dimensions M x N #if CUTLASS_ENABLE_CUBLAS // Symm output (D) for SYMM_cublasF32 cutlass::HostTensor<float, LayoutOutput> tensor_d_cublasF32(problem_size.mn()); // <- Create matrix D with dimensions M x N #endif // Copy values from the DP tensors cutlass::reference::host::TensorCopy(tensor_a_F64.host_view(), tensor_a_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_b_F64.host_view(), tensor_b_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_c_F64.host_view(), tensor_c_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_F64.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32.host_view(), tensor_d_F32.host_view()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32.host_view(), tensor_d_F32.host_view()); #if CUTLASS_ENABLE_CUBLAS cutlass::reference::host::TensorCopy(tensor_d_cublasF32.host_view(), tensor_d_F32.host_view()); #endif // Copy data from host to GPU tensor_a_F64.sync_device(); tensor_b_F64.sync_device(); tensor_c_F64.sync_device(); tensor_d_F64.sync_device(); tensor_d_3xTF32.sync_device(); tensor_d_1xTF32.sync_device(); #if CUTLASS_ENABLE_CUBLAS tensor_d_cublasF32.sync_device(); #endif // Initialize alpha and beta for dot product computation float alpha = float(options.alpha); float beta = float(options.beta); // Batch count as 1 int batch_count = 1; // Batch stride for A, when matrix A is in Left Side mode int batch_stride_A = problem_size.m()*problem_size.m(); //////////////////////////////////////////////////////////////////////////////// /// 3. Run 3xTF32 kernel //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Symm_3xTF32::Arguments arguments_3xtf32{ cutlass::gemm::GemmUniversalMode::kGemm, problem_size, // <- problem size of matrix multiplication batch_count, // <- batch count {alpha, beta}, // <- tuple of alpha and beta tensor_a_F32.device_data(), // <- reference to matrix A on device tensor_b_F32.device_data(), // <- reference to matrix B on device tensor_c_F32.device_data(), // <- reference to matrix C on device tensor_d_3xTF32.device_data(), // <- reference to matrix D on device batch_stride_A, // <- batch stride and ld for matrices problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_a_F32.layout().stride(0), tensor_b_F32.layout().stride(0), tensor_c_F32.layout().stride(0), tensor_d_3xTF32.layout().stride(0) }; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_3xtf32 = Symm_3xTF32::get_workspace_size(arguments_3xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_3xtf32(workspace_size_3xtf32); // Instantiate CUTLASS kernel depending on templates Symm_3xTF32 symm_op_3xtf32; // Check the problem size is supported or not cutlass::Status status_3xtf32 = symm_op_3xtf32.can_implement(arguments_3xtf32); CUTLASS_CHECK(status_3xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_3xtf32 = symm_op_3xtf32.initialize(arguments_3xtf32, workspace_3xtf32.get()); CUTLASS_CHECK(status_3xtf32); // Launch initialized CUTLASS kernel status_3xtf32 = symm_op_3xtf32(); CUTLASS_CHECK(status_3xtf32); tensor_d_3xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 4. Run 1xTF32 kernel //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Symm_1xTF32::Arguments arguments_1xtf32{ cutlass::gemm::GemmUniversalMode::kGemm, problem_size, // <- problem size of matrix multiplication batch_count, // <- batch count {alpha, beta}, // <- tuple of alpha and beta tensor_a_F32.device_data(), // <- reference to matrix A on device tensor_b_F32.device_data(), // <- reference to matrix B on device tensor_c_F32.device_data(), // <- reference to matrix C on device tensor_d_1xTF32.device_data(), // <- reference to matrix D on device batch_stride_A, // <- batch stride and ld for matrices problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_a_F32.layout().stride(0), tensor_b_F32.layout().stride(0), tensor_c_F32.layout().stride(0), tensor_d_1xTF32.layout().stride(0) }; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_1xtf32 = Symm_1xTF32::get_workspace_size(arguments_1xtf32); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_1xtf32(workspace_size_1xtf32); // Instantiate CUTLASS kernel depending on templates Symm_1xTF32 symm_op_1xtf32; // Check the problem size is supported or not cutlass::Status status_1xtf32 = symm_op_1xtf32.can_implement(arguments_1xtf32); CUTLASS_CHECK(status_1xtf32); // Initialize CUTLASS kernel with arguments and workspace pointer status_1xtf32 = symm_op_1xtf32.initialize(arguments_1xtf32, workspace_1xtf32.get()); CUTLASS_CHECK(status_1xtf32); // Launch initialized CUTLASS kernel status_1xtf32 = symm_op_1xtf32(); CUTLASS_CHECK(status_1xtf32); tensor_d_1xTF32.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 5. Run F64 kernel //////////////////////////////////////////////////////////////////////////////// // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Symm_F64::Arguments arguments_f64{ cutlass::gemm::GemmUniversalMode::kGemm, problem_size, // <- problem size of matrix multiplication batch_count, // <- batch count {double(options.alpha), double(options.alpha)}, // <- tuple of alpha and beta tensor_a_F64.device_data(), // <- reference to matrix A on device tensor_b_F64.device_data(), // <- reference to matrix B on device tensor_c_F64.device_data(), // <- reference to matrix C on device tensor_d_F64.device_data(), // <- reference to matrix D on device batch_stride_A, // <- batch stride and ld for matrices problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), problem_size.m() * problem_size.n(), tensor_a_F64.layout().stride(0), tensor_b_F64.layout().stride(0), tensor_c_F64.layout().stride(0), tensor_d_F64.layout().stride(0) }; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size_f64 = Symm_F64::get_workspace_size(arguments_f64); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace_f64(workspace_size_f64); // Instantiate CUTLASS kernel depending on templates Symm_F64 symm_op_f64; // Check the problem size is supported or not cutlass::Status status_f64 = symm_op_f64.can_implement(arguments_f64); CUTLASS_CHECK(status_f64); // Initialize CUTLASS kernel with arguments and workspace pointer status_f64 = symm_op_f64.initialize(arguments_f64, workspace_f64.get()); CUTLASS_CHECK(status_f64); // Launch initialized CUTLASS kernel status_f64 = symm_op_f64(); CUTLASS_CHECK(status_f64); cudaDeviceSynchronize(); tensor_d_F64.sync_host(); //////////////////////////////////////////////////////////////////////////////// /// 6. Run cuBLAS SSYMM kernel //////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUBLAS cublasStatus_t cublas_status; cublasHandle_t handle; cublas_status = cublasCreate(&handle); if (cublas_status != CUBLAS_STATUS_SUCCESS) { std::cerr << "Failed to create cuBLAS handle." << std::endl; return false; } cublas_status = cublasSsymm( handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, problem_size.m(), problem_size.n(), static_cast<const float*>(&alpha), static_cast<const float*>(tensor_a_F32.device_data()), int(tensor_a_F32.layout().stride(0)), static_cast<const float*>(tensor_b_F32.device_data()), int(tensor_b_F32.layout().stride(0)), static_cast<const float*>(&beta), static_cast<float*>(tensor_d_cublasF32.device_data()), int(tensor_d_cublasF32.layout().stride(0)) ); cudaDeviceSynchronize(); tensor_d_cublasF32.sync_host(); #endif //////////////////////////////////////////////////////////////////////////////// /// 7. Compute l2 norms //////////////////////////////////////////////////////////////////////////////// #if CUTLASS_ENABLE_CUBLAS // l2 norm cuBLAS F32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_cublasF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_cublasF32_in_F64.host_view(), tensor_d_cublasF32.host_view()); double l2_norm_cublasf32_vs_f64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_cublasF32_in_F64.host_view(), tensor_d_F64.host_view()); #endif // l2 norm 3xTF32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_3xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_3xTF32_in_F64.host_view(), tensor_d_3xTF32.host_view()); double l2_norm_3xtf32_vs_f64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32_in_F64.host_view(), tensor_d_F64.host_view()); // l2 norm 1xTF32 vs F64 cutlass::HostTensor<double, LayoutOutput> tensor_d_1xTF32_in_F64(problem_size.mn()); cutlass::reference::host::TensorCopy(tensor_d_1xTF32_in_F64.host_view(), tensor_d_1xTF32.host_view()); double l2_norm_1xtf32_vs_f64 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_1xTF32_in_F64.host_view(), tensor_d_F64.host_view()); #if CUTLASS_ENABLE_CUBLAS // l2 norm 3xTF32 vs cuBLAS F32 double l2_norm_3xtf32_vs_cublasf32 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32.host_view(), tensor_d_cublasF32.host_view()); #endif // l2 norm 3xTF32 vs 1xTF32 double l2_norm_3xtf32_vs_1xtf32 = cutlass::reference::host::TensorRelativeErrorMetric( tensor_d_3xTF32.host_view(), tensor_d_1xTF32.host_view()); /////////////////////////////////////////////////////////////////////////////// // Print kernel info and L2 norms std::cout << "Problem Size: (" << problem_size.m() << "," << problem_size.n() << "," << problem_size.k() << ") " << "Alpha: " << alpha << "," << " Beta: " << beta << std::endl; std::cout << std::fixed; std::cout << "Normalized L2 norm of" << std::endl; std::cout.precision(8); std::cout << std::scientific #if CUTLASS_ENABLE_CUBLAS << " - cuBLAS F32 error with F64 reference : " << l2_norm_cublasf32_vs_f64 << std::endl #endif << " - 3xTF32 error with F64 reference : " << l2_norm_3xtf32_vs_f64 << std::endl << " - 1xTF32 error with F64 reference : " << l2_norm_1xtf32_vs_f64 << std::endl #if CUTLASS_ENABLE_CUBLAS << " - 3xTF32 error with cuBLAS F32 reference : " << l2_norm_3xtf32_vs_cublasf32 << std::endl #endif << " - 3xTF32 error with 1xTF32 reference : " << l2_norm_3xtf32_vs_1xtf32 << std::endl; return true; } int main(int argc, const char **argv) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } Options options; options.parse(argc, argv); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } bool result = true; if (!options.valid()) { std::cerr << "Invalid problem." << std::endl; return -1; } result = run(options); if (!result) return -1; return 0; }
cutlass/examples/33_ampere_3xtf32_tensorop_symm/ampere_3xtf32_tensorop_symm.cu/0
{ "file_path": "cutlass/examples/33_ampere_3xtf32_tensorop_symm/ampere_3xtf32_tensorop_symm.cu", "repo_id": "cutlass", "token_count": 14585 }
7
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief GEMM Permute Example. This example computes batched GEMM operations with output results permuted as reshaped tensors. We provide layout plugin as a flexible tool for users to add any customized input/output tensor permute operation, or any other generalized global memory writeout address computation. To add a customized layout, add new class in include/cutlass/layout/permute.h In this example we use several permute operations (permute([0, 2, 1, 3])) In this example, we used Tensor4DPermuteBMM0213 layout to perform Batched GEMM with permute([0, 2, 1, 3]) on BMM whole output tensor, and used Tensor5DPermute20314 layout to perform Normal GEMM with permute([2, 0, 3, 1, 4]) on output matrix. The address computations are performed in compute(col_init, row_init, stride_init, BMM_batch_idx) with {col_permute, row_permute and stride_permute} as new addresses after permute op. (check include/cutlass/layout/permute.h) Tips: 1) Make sure to set batch_stride to zero for BMM permute; also the BMM GEMM should be in mode cutlass::gemm::GemmUniversalMode::kBatched instead of kArray. 2) When the contiguous dimension is touched in permute op (for example [0, 2, 3, 1] for row-major matrix or [1, 0, 2, 3] for column-major), Alignment should be set to 1 for the corresponding matrix. If the last dimension is untouched, one can set Alignment to be larger like 8 in our example. As a result, permute op without touching the unit stride dimension is recommended to obtain the best performance. Examples: # Runs a batched GEMM with 96 batches $ ./examples/39_gemm_permute/39_gemm_permute --problem-count=96 # Runs a batched GEMM with 96 batches (with GEMM-K dimension equal to 1024) $ ./examples/39_gemm_permute/39_gemm_permute --problem-count=96 --k=1024 --verbose=true # Execute batched GEMM and profile with NSight $ nv-nsight-cu-cli ./examples/39_gemm_permute/39_gemm_permute --m=256 --n=192 --k=256 --verbose=true --iterations=1 --reference-check=false */ ///////////////////////////////////////////////////////////////////////////////////////////////// #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <map> #include <unordered_map> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/device/gemm_universal.h" #include "cutlass/util/command_line.h" #include "cutlass/util/distribution.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/host/gemm_complex.h" #include "cutlass/util/reference/device/gemm_complex.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/device/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/device/tensor_fill.h" #include "cutlass/util/reference/host/tensor_norm.h" #include "cutlass/layout/permute.h" #include "layouts.h" #include "permute_info.h" /// Tensor4DPermuteBMM0213 ---> /// Permute layout function for 4-D permuted tensors for BMM with BMM tensor (dimension as [B, M, N]) reshaped /// as [B/D1, D1, M, N]. Then perform permute([0, 2, 1, 3]) on the corresponding whole BMM tensor. int constexpr D1 = 12; /// Tensor5DPermute20314 ---> /// Permute layout function for 5-D permuted tensors with matrix (dimension as [M, N]) reshaped /// as [M/T1, T1, T2, T3, N/T2/T3]. Then perform permute([2, 0, 3, 1, 4]) on the corresponding tensor. int constexpr T1 = 16; int constexpr T2 = 3; int constexpr T3 = 8; /// Tensor4DPermute0213 ---> /// Permute layout function for 4-D permuted tensors with matrix (dimension as [M, N]) reshaped /// as [M/S1, S1, S2, N/S2]. Then perform permute([0, 2, 1, 3]) on the corresponding tensor. int constexpr S1 = 8; int constexpr S2 = 4; // // // Alignments int constexpr AlignmentA = 8; int constexpr AlignmentB = 8; int constexpr AlignmentC = 8; /// GEMM element types using ElementInput = cutlass::half_t; using ElementOutput = cutlass::half_t; using ElementAccumulator = float; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Useful macros #define CHECK_CUDA_CALL(call, handler) \ do { \ cudaError_t __err = (call); \ if (__err != cudaSuccess) { \ std::cerr << #call " failed: " << cudaGetErrorString(__err) << std::endl; \ handler; \ } \ } while(0) #define CHECK_CUTLASS_CALL(call, handler) \ do { \ cutlass::Status __status = (call); \ if (__status != cutlass::Status::kSuccess) { \ std::cerr << #call " failed: " << cutlass::cutlassGetStatusString(__status) << std::endl; \ handler; \ } \ } while(0) ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; bool error; bool reference_check; cutlass::gemm::GemmCoord problem_each; int batch_count; int iterations; int cuda_streams; bool verbose; float alpha; float beta; // // Methods // Options(): help(false), error(false), reference_check(true), batch_count(-1), iterations(20), cuda_streams(0), verbose(false), alpha(1), beta() { } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; return; } cmd.get_cmd_line_argument("alpha", alpha, 1.0f); cmd.get_cmd_line_argument("beta", beta, 0.0f); cmd.get_cmd_line_argument("iterations", iterations, 20); cmd.get_cmd_line_argument("streams", cuda_streams, 0); cmd.get_cmd_line_argument("verbose", verbose, false); cmd.get_cmd_line_argument("reference-check", reference_check, true); int m, n, k; cmd.get_cmd_line_argument("m", m, 384); cmd.get_cmd_line_argument("n", n, 192); cmd.get_cmd_line_argument("k", k, 384); cmd.get_cmd_line_argument("batch-count", batch_count, 96); problem_each = cutlass::gemm::GemmCoord(m, n, k); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "39_gemm_permute\n" "\n" " This example tests and profiles the performance of normal GEMM and batched GEMM with different" " combinations of fused permutations of input and output tensors." "\n" " Permutations considered in this example:\n" "\n" " Normal GEMM:\n" " 1) Tensor4DPermute0213: matrix of shape [X, Y] is reshaped as [X/S1, S1, S2, Y/S2] and has its dimensions" " permuted as [0, 2, 1, 3], resulting in shape [X/S1, S2, S1, Y/S2] viewed as matrix of shape [X*S2/S1, Y*S1/S2].\n" " 2) Tensor5DPermute20314: matrix of shape [X, Y] is reshaped as [X/T1, T1, T2, T3, Y/T2/T3] and has its dimensions" " permuted as [2, 0, 3, 1, 4], resulting in shape [T2, X/T1, T3, T1, Y/T2/T3] viewed as matrix of shape [X*T2/T1, Y*T1/T2].\n" "\n" " Batched GEMM:\n" " 3) Tensor4DPermuteBMM0213: batched tensor of 3D shape [B, X, Y] is reshaped as 4D shape [B/D1, D1, X, Y]" " and has its dimensions permuted as [0, 2, 1, 3], resulting in shape [B/D1, X, D1, Y] viewed as" " a matrix of shape [B/D1, X, Y*D1] for batched GEMM purposes.\n" "\n" " Note: S1, S2, D1, D2, T1, T2, T3 are compile-time constants defined in gemm_permute.cu." " Runtime specification of these values is not supported." " These values along with alignment requirements place constraints on supported matrix sizes.\n" "\n" " Note: X, Y above may refer to M, N or K dimensions of GEMM problem, depending on the tensor considered (A, B or D)." " For the output tensor D the values correspond directly to dimensions of D, whereas for A and B the original dimensions" " X', Y' are inferred from the ones supplied to the GEMM, taking into account the permute operation.\n" "\n" "Options:\n" "\n" " --help If specified, displays this usage statement.\n\n" " --batch-count=<int> Sets the number of batches in batched GEMM (batch number for BMM). (default: --batch-count=768)\n" " --m=<int> Sets the M dimension for both batched GEMM and normal GEMM problems. (default: --m=128)\n" " --n=<int> Sets the N dimension for both batched GEMM and normal GEMM problems. (default: --n=192)\n" " --k=<int> Sets the K dimension for both batched GEMM and normal GEMM problems. (default: --k=384)\n" " --alpha=<f32> Epilogue scalar alpha (real part)\n" " --beta=<f32> Epilogue scalar beta (real part)\n\n" " --iterations=<int> Number of profiling iterations to perform.\n" " --reference-check=<bool> If true, performs reference check.\n" " --verbose=<bool> If true, prints problem sizes and batching structure.\n" "\n" "Examples:\n" "\n" "# Runs a batched GEMM with 96 batches\n" "$ ./examples/39_gemm_permute/39_gemm_permute --batch-count=96\n" "\n" "# Runs a batched GEMM with 96 batches (with GEMM-K dimension equal to 1024)\n" "$ ./examples/39_gemm_permute/39_gemm_permute --batch-count=96 --k=1024 --verbose=true\n" "\n" "# Execute batched GEMM and profile with NSight\n" "$ nv-nsight-cu-cli ./examples/39_gemm_permute/39_gemm_permute --m=256 --n=192 --k=256 --verbose=true --iterations=1 --reference-check=false\n" "\n"; return out; } /// Compute performance in GFLOP/s double gflops(double runtime_s, bool batched) const { // Number of real-valued multiply-adds int64_t fmas = int64_t(); fmas += problem_each.product() * (batched ? batch_count : 1); // Two flops per multiply-add return 2.0 * double(fmas) / double(1.0e9) / runtime_s; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// namespace { // (anonymous) /// Dimension-generic permutation loop template<int I, typename Element, typename Layout, typename PermuteOp, typename Coord> void permute_host_impl( cutlass::TensorView<Element const, Layout> const & input, cutlass::TensorView<Element, Layout> const & output, PermuteOp && permute, Coord & coord ) { static_assert(Layout::kRank == Coord::kRank, "Incompatible Layout and Coord types"); if constexpr (I == Coord::kRank) { output.at(permute(coord)) = input.at(coord); } else { for (coord[I] = 0; coord[I] < input.extent(I); ++coord[I]) { permute_host_impl<I+1>(input, output, std::forward<PermuteOp>(permute), coord); } } } } // namespace (anonymous) /// Perform a reference (host-based) permutation of an input tensor template<typename PermuteLayout, typename Element, typename Layout> void permute_host( cutlass::TensorView<Element const, Layout> const &input, cutlass::TensorView<Element, Layout> const &output, int batch_count) { Layout layout = input.layout(); cutlass::MatrixCoord extent = input.extent(); std::size_t num_elems = layout.capacity(extent) * batch_count; std::vector<Element> h_input(num_elems); cutlass::device_memory::copy_to_host(h_input.data(), input.data(), num_elems); std::vector<Element> h_output(num_elems); using Info = PermuteInfo<PermuteLayout>; using TensorLayout = typename Info::Layout; auto shape_orig = Info::original_shape(extent, batch_count); auto shape_perm = Info::permute(shape_orig); cutlass::TensorView<Element const, TensorLayout> view_input(h_input.data(), TensorLayout::packed(shape_orig), shape_orig); cutlass::TensorView<Element, TensorLayout> view_output(h_output.data(), TensorLayout::packed(shape_perm), shape_perm); decltype(shape_orig) coord; permute_host_impl<0>(view_input, view_output, Info::permute, coord); cutlass::device_memory::copy_to_device(output.data(), h_output.data(), num_elems); } template<typename Layout> struct LayoutInfo; template<> struct LayoutInfo<cutlass::layout::RowMajor> { static std::string name() { return "RowMajor"; } }; template<> struct LayoutInfo<cutlass::layout::ColumnMajor> { static std::string name() { return "ColumnMajor"; } }; /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename ElementA, typename ElementB, typename ElementC> class Testbed { private: // // Data members // Options & options; /// Initialization cutlass::Distribution::Kind init_A; cutlass::Distribution::Kind init_B; cutlass::Distribution::Kind init_C; uint32_t seed; cutlass::DeviceAllocation<ElementA> block_A; cutlass::DeviceAllocation<ElementB> block_B; cutlass::DeviceAllocation<ElementC> block_C; cutlass::DeviceAllocation<ElementC> block_D; public: // // Methods // Testbed( Options &options_, cutlass::Distribution::Kind init_A_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_B_ = cutlass::Distribution::Uniform, cutlass::Distribution::Kind init_C_ = cutlass::Distribution::Uniform, uint32_t seed_ = 3090 ): options(options_), init_A(init_A_), init_B(init_B_), init_C(init_C_), seed(seed_) { } private: /// Print permutation info for one tensor template<typename PermuteLayout> void print_tensor_info( std::ostream & os, std::string const &tensor_name, int row_dim, int col_dim) { cutlass::MatrixCoord extent(options.problem_each.at(row_dim), options.problem_each.at(col_dim)); using Info = PermuteInfo<PermuteLayout>; os << "tensor " << tensor_name << ": " << Info::desc() << "\n"; os << " extent: [" << extent.row() << ", " << extent.column() << "]"; if (Info::kBatched) { os << ", batch count: " << options.batch_count; } os << "\n"; if (!cutlass::layout::is_trivial_permute<PermuteLayout>) { auto shape_orig = Info::original_shape(extent, options.batch_count); auto shape_perm = Info::permute(shape_orig); os << " original: [" << shape_orig << "]\n"; os << " permuted: [" << shape_perm << "]\n"; } } /// Check shape compatibility for one tensor template<typename Layout, typename PermuteLayout, int Alignment> bool check_tensor_shape( std::string const &tensor_name, int row_dim, int col_dim) { cutlass::MatrixCoord extent(options.problem_each.at(row_dim), options.problem_each.at(col_dim)); using Info = PermuteInfo<PermuteLayout>; auto rowAlign = cutlass::platform::is_same<Layout, cutlass::layout::ColumnMajor>::value ? Alignment : 1; auto colAlign = cutlass::platform::is_same<Layout, cutlass::layout::RowMajor>::value ? Alignment : 1; auto rowFactor = Info::kRowFactor * rowAlign; auto colFactor = Info::kColumnFactor * colAlign; // Assumes row-major layout bool const valid_row = extent.row() % rowFactor == 0; if (!valid_row) { std::cerr << "Invalid tensor " << tensor_name << " row size = " << extent.row() << ", " "must be divisible by " << rowFactor << ", " "required by " << Info::name() << (rowAlign > 1 ? (" and alignment of " + std::to_string(rowAlign)) : "") << std::endl; } bool const valid_col = extent.column() % colFactor == 0; if (!valid_col) { std::cerr << "Invalid tensor " << tensor_name << " column size = " << extent.column() << ", " "must be divisible by " << colFactor << ", " "required by " << Info::name() << (colAlign > 1 ? (" and alignment of " + std::to_string(colAlign)) : "") << std::endl; } bool const valid_bsz = options.batch_count % Info::kBatchFactor == 0; if (!valid_bsz) { std::cerr << "Invalid batch count = " << options.batch_count << ", " "must be divisible by " << Info::kBatchFactor << ", " "required by " << Info::name() << std::endl; } return valid_row && valid_col && valid_bsz; } /// Helper to initialize a tensor view template <typename Element> void initialize_tensor_( Element *ptr, size_t capacity, cutlass::Distribution::Kind dist_kind, uint32_t seed) { if (dist_kind == cutlass::Distribution::Uniform) { Element scope_max, scope_min; int bits_input = cutlass::sizeof_bits<Element>::value; int bits_output = cutlass::sizeof_bits<ElementC>::value; if (bits_input == 1) { scope_max = 2; scope_min = 0; } else if (bits_input <= 8) { scope_max = 2; scope_min = -2; } else if (bits_output == 16) { if (cutlass::sizeof_bits<ElementAccumulator>::value <= 16) { scope_max = 5; scope_min = -5; } else { scope_max = 8; scope_min = -8; } } else { scope_max = 8; scope_min = -8; } cutlass::reference::device::BlockFillRandomUniform( ptr, capacity, seed, scope_max, scope_min, 0); } else if (dist_kind == cutlass::Distribution::Gaussian) { cutlass::reference::device::BlockFillRandomGaussian( ptr, capacity, seed, Element(), Element(0.5f)); } else if (dist_kind == cutlass::Distribution::Sequential) { // Fill with increasing elements cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(1), Element()); } else { // Fill with all 1s cutlass::reference::device::BlockFillSequential( ptr, capacity, Element(), Element(1)); } } /// Initializes data structures void initialize(int batch_count) { srand(seed); int64_t total_elements_A = options.problem_each.m() * options.problem_each.k() * batch_count; int64_t total_elements_B = options.problem_each.n() * options.problem_each.k() * batch_count; int64_t total_elements_C = options.problem_each.m() * options.problem_each.n() * batch_count; int64_t total_elements_D = options.problem_each.m() * options.problem_each.n() * batch_count; // Allocate space block_A.reset(total_elements_A); block_B.reset(total_elements_B); block_C.reset(total_elements_C); block_D.reset(total_elements_D); // Initialize input tensors initialize_tensor_(block_A.get(), total_elements_A, init_A, seed * 2021); initialize_tensor_(block_B.get(), total_elements_B, init_B, seed * 2022); initialize_tensor_(block_C.get(), total_elements_C, init_C, seed * 2023); cutlass::reference::device::BlockFillSequential( block_D.get(), total_elements_D, ElementC(), ElementC()); } /// Check device GEMM results against a reference implementation with separate host-based permutation template<typename Gemm> bool validate(Gemm const &gemm) { bool constexpr kBatched = PermuteInfo<typename Gemm::PermuteALayout>::kBatched || PermuteInfo<typename Gemm::PermuteBLayout>::kBatched || PermuteInfo<typename Gemm::PermuteDLayout>::kBatched; int const batch_count = kBatched ? options.batch_count : 1; cutlass::gemm::GemmCoord problem = options.problem_each; cutlass::MatrixCoord extent_A{problem.m(), problem.k()}; cutlass::MatrixCoord extent_B{problem.k(), problem.n()}; cutlass::MatrixCoord extent_C{problem.m(), problem.n()}; using LayoutA = typename Gemm::LayoutA; using LayoutB = typename Gemm::LayoutB; using LayoutC = typename Gemm::LayoutC; LayoutA layout_A(LayoutA::packed(extent_A)); LayoutB layout_B(LayoutB::packed(extent_B)); LayoutC layout_C(LayoutC::packed(extent_C)); auto size_A = layout_A.capacity(extent_A) * batch_count; auto size_B = layout_B.capacity(extent_B) * batch_count; auto size_C = layout_C.capacity(extent_C) * batch_count; cutlass::TensorView<ElementA, LayoutA> view_A(block_A.get(), layout_A, extent_A); cutlass::TensorView<ElementB, LayoutB> view_B(block_B.get(), layout_B, extent_B); cutlass::TensorView<ElementC, LayoutC> view_C(block_C.get(), layout_C, extent_C); cutlass::TensorView<ElementC, LayoutC> view_D(block_D.get(), layout_C, extent_C); cutlass::DeviceAllocation<ElementA> block_A_perm(size_A); cutlass::DeviceAllocation<ElementA> block_B_perm(size_B); cutlass::TensorView<ElementA, LayoutA> view_A_perm(block_A_perm.get(), layout_A, extent_A); cutlass::TensorView<ElementB, LayoutB> view_B_perm(block_B_perm.get(), layout_B, extent_B); permute_host<typename Gemm::PermuteALayout>(view_A.const_view(), view_A_perm, batch_count); permute_host<typename Gemm::PermuteBLayout>(view_B.const_view(), view_B_perm, batch_count); cutlass::DeviceAllocation<ElementC> block_D_ref(size_C); cutlass::TensorView<ElementC, LayoutC> view_D_ref(block_D_ref.get(), layout_C, extent_C); using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; // Reference GEMM cutlass::reference::device::GemmComplex< ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, typename EpilogueOutputOp::ElementCompute, typename Gemm::ElementAccumulator >( problem, options.alpha, view_A_perm, Gemm::kTransformA, view_B_perm, Gemm::kTransformB, options.beta, view_C, view_D_ref, ElementAccumulator(0), batch_count, options.problem_each.m() * options.problem_each.k(), options.problem_each.n() * options.problem_each.k(), options.problem_each.m() * options.problem_each.n(), options.problem_each.m() * options.problem_each.n() ); cutlass::DeviceAllocation<ElementC> block_D_perm(size_C); cutlass::TensorView<ElementC, LayoutC> view_D_perm(block_D_perm.get(), layout_C, extent_C); permute_host<typename Gemm::PermuteDLayout>(view_D_ref.const_view(), view_D_perm, batch_count); // Reference check return cutlass::reference::device::BlockCompareEqual(view_D_perm.data(), view_D.data(), size_C); } public: template<typename Gemm> bool profile_GEMM_permute() { using LayoutA = typename Gemm::LayoutA; using LayoutB = typename Gemm::LayoutB; using LayoutC = typename Gemm::LayoutC; using PermuteALayout = typename Gemm::PermuteALayout; using PermuteBLayout = typename Gemm::PermuteBLayout; using PermuteDLayout = typename Gemm::PermuteDLayout; bool constexpr kBatched = PermuteInfo<PermuteALayout>::kBatched || PermuteInfo<PermuteBLayout>::kBatched || PermuteInfo<PermuteDLayout>::kBatched; std::cout << "\n" "====================================================\n" << (kBatched ? "Batched" : "Normal") << " GEMM:" << "\n A=" << LayoutInfo<LayoutA>::name() << "," << PermuteInfo<PermuteALayout>::name() << "\n B=" << LayoutInfo<LayoutB>::name() << "," << PermuteInfo<PermuteBLayout>::name() << "\n D=" << LayoutInfo<LayoutC>::name() << "," << PermuteInfo<PermuteDLayout>::name() << "\n" "====================================================\n"; if (options.verbose) { print_tensor_info<PermuteALayout>(std::cout, "A", 0, 2); print_tensor_info<PermuteBLayout>(std::cout, "B", 2, 1); print_tensor_info<PermuteDLayout>(std::cout, "D", 0, 1); } std::cout << std::endl; bool valid = true; valid &= check_tensor_shape<LayoutA, PermuteALayout, Gemm::kAlignmentA>("A", 0, 2); valid &= check_tensor_shape<LayoutB, PermuteBLayout, Gemm::kAlignmentB>("B", 2, 1); valid &= check_tensor_shape<LayoutC, PermuteDLayout, Gemm::kAlignmentC>("D", 0, 1); if (!valid) { std::cout << "Skipped test" << std::endl; return true; } int const batch_count = kBatched ? options.batch_count : 1; // Initialize the problem initialize(batch_count); // Configure the GEMM arguments using EpilogueOutputOp = typename Gemm::GemmKernel::Epilogue::OutputOp; typename EpilogueOutputOp::Params epilogue_op(options.alpha, options.beta); // Please make sure all problem_sizes are the same for kBatched mode auto problem = options.problem_each; cutlass::MatrixCoord extent_A{problem.m(), problem.k()}; cutlass::MatrixCoord extent_B{problem.k(), problem.n()}; cutlass::MatrixCoord extent_C{problem.m(), problem.n()}; LayoutA layout_A(LayoutA::packed(extent_A)); LayoutB layout_B(LayoutB::packed(extent_B)); LayoutC layout_C(LayoutC::packed(extent_C)); // Configure GEMM arguments typename Gemm::Arguments arguments{ kBatched ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm, problem, batch_count, epilogue_op, (void*)block_A.get(), (void*)block_B.get(), (void*)block_C.get(), (void*)block_D.get(), // For any non-trivial permute the batch stride must be set to 0 cutlass::layout::is_trivial_permute<PermuteALayout> ? layout_A.capacity(extent_A) : 0, cutlass::layout::is_trivial_permute<PermuteBLayout> ? layout_B.capacity(extent_B) : 0, layout_C.capacity(extent_C), cutlass::layout::is_trivial_permute<PermuteDLayout> ? layout_C.capacity(extent_C) : 0, layout_A.stride(0), layout_B.stride(0), layout_C.stride(0), layout_C.stride(0), }; // Initialize the GEMM object Gemm gemm_normal; CHECK_CUTLASS_CALL(gemm_normal.initialize(arguments, nullptr), return false); // Run the normal GEMM object CHECK_CUTLASS_CALL(gemm_normal.run(), return false); // Wait for completion CHECK_CUDA_CALL(cudaDeviceSynchronize(), return false); // // Verify correctness // if (options.reference_check) { if (validate(gemm_normal)) { std::cout << "\nPassed verification\n" << std::endl; } else { std::cerr << "\n*** Error - problem failed the QA check ***\n" << std::endl; return false; } } // Warm-up run of the normal GEMM object CHECK_CUTLASS_CALL(gemm_normal.run(), return false); // Construct events cudaEvent_t events[2]; for (auto & event : events) { CHECK_CUDA_CALL(cudaEventCreate(&event), return false); } // Record an event at the start of a series of GEMM operations CHECK_CUDA_CALL(cudaEventRecord(events[0]), return false); // Run profiling loop for (int iter = 0; iter < options.iterations; ++iter) { gemm_normal(); } // Record an event when the GEMM operations have been launched. CHECK_CUDA_CALL(cudaEventRecord(events[1]), return false); // Wait for work on the device to complete. CHECK_CUDA_CALL(cudaEventSynchronize(events[1]), return false); // Measure elapsed runtime float runtime_total_ms = 0; CHECK_CUDA_CALL(cudaEventElapsedTime(&runtime_total_ms, events[0], events[1]), return false); // Compute average runtime and GFLOPs. double runtime_avg_ms = double(runtime_total_ms) / double(options.iterations); double gflops = options.gflops(runtime_avg_ms / 1000.0, kBatched); // Cleanup for (auto event : events) { CHECK_CUDA_CALL(cudaEventDestroy(event), return false); } std::cout << " Runtime: " << runtime_avg_ms << " ms\n" " GFLOPs: " << gflops << std::endl; return true; } }; /// Shorthand alist for GEMM instantiations template<typename LayoutA, typename PermuteALayout, typename LayoutB, typename PermuteBLayout, typename LayoutC, typename PermuteDLayout> using GemmPermute = cutlass::gemm::device::GemmUniversal< ElementInput, LayoutA, ElementInput, LayoutB, ElementOutput, LayoutC, ElementAccumulator, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm80, cutlass::gemm::GemmShape<128, 128, 32>, cutlass::gemm::GemmShape<64, 64, 32>, cutlass::gemm::GemmShape<16, 8, 16>, cutlass::epilogue::thread::LinearCombination< ElementOutput, AlignmentC, //128 / cutlass::sizeof_bits<ElementOutput>::value, ElementAccumulator, ElementAccumulator >, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>, 4, /*kStages*/ AlignmentA, /*AlignmentA*/ AlignmentB, /*AlignmentB*/ cutlass::arch::OpMultiplyAdd, cutlass::ComplexTransform::kNone, cutlass::ComplexTransform::kNone, false, /*GatherA*/ false, /*GatherB*/ false, /*ScatterD*/ PermuteDLayout, /*PermuteDLayout*/ typename cutlass::layout::InversePermute<PermuteALayout>::type, /*PermuteALayout*/ typename cutlass::layout::InversePermute<PermuteBLayout>::type /*PermuteBLayout*/ >; /////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char const **args) { // // This example uses mma.sync to directly access Tensor Cores to achieve peak performance. // cudaDeviceProp props; CHECK_CUDA_CALL(cudaGetDeviceProperties(&props, 0), return EXIT_FAILURE); if (__CUDACC_VER_MAJOR__ < 11 || props.major < 8) { // // This example requires an NVIDIA Ampere-architecture GPU. // std::cout << "CUTLASS's GEMM+Permute example requires a GPU of NVIDIA's Ampere Architecture " "or later (compute capability 80 or greater).\n"; return EXIT_SUCCESS; } // // Parse options // Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return EXIT_SUCCESS; } if (options.error) { std::cerr << "Aborting execution." << std::endl; return EXIT_FAILURE; } // // Define GEMM types to test // // // TTT (Row-major) GEMMs // using TTTGemmNormalPermuteNone = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using TTTGemmNormalPermuteA = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using TTTGemmNormalPermuteAD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; using TTTGemmNormalPermuteB = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using TTTGemmNormalPermuteBD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; using TTTGemmNormalPermuteD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; using TTTGemmNormalPermuteAB = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using TTTGemmNormalPermuteABD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; // // NNN (Col-major) GEMMs // using NNNGemmNormalPermuteNone = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmNormalPermuteA = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmNormalPermuteAD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; using NNNGemmNormalPermuteB = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmNormalPermuteBD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; using NNNGemmNormalPermuteD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; using NNNGemmNormalPermuteAB = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmNormalPermuteABD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; // // NNT (Col-major inputs, row-major output) GEMMs // using NNTGemmNormalPermuteNone = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using NNTGemmNormalPermuteA = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using NNTGemmNormalPermuteAD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; using NNTGemmNormalPermuteB = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using NNTGemmNormalPermuteBD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; using NNTGemmNormalPermuteD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; using NNTGemmNormalPermuteAB = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using NNTGemmNormalPermuteABD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermute0213ColumnMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor5DPermute20314RowMajor<T1, T2, T3> >; // // TTN (Row-major inputs, col-major output) GEMMs // using TTNGemmNormalPermuteNone = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using TTNGemmNormalPermuteA = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using TTNGemmNormalPermuteAD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; using TTNGemmNormalPermuteB = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using TTNGemmNormalPermuteBD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; using TTNGemmNormalPermuteD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; using TTNGemmNormalPermuteAB = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using TTNGemmNormalPermuteABD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermute0213RowMajor<S1, S2>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor5DPermute02413ColumnMajor<T1, T2, T3> >; // // TTT (Row-major) BMMs // using TTTGemmBatchedPermuteA = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using TTTGemmBatchedPermuteAD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1> >; using TTTGemmBatchedPermuteB = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::NoPermute >; using TTTGemmBatchedPermuteBD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1> >; using TTTGemmBatchedPermuteD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1> >; using TTTGemmBatchedPermuteAB = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::NoPermute, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1> >; using TTTGemmBatchedPermuteABD = GemmPermute< cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1>, cutlass::layout::RowMajor, cutlass::layout::Tensor4DPermuteBMM0213RowMajor<D1> >; // // NNN (Col-major) BMMs // using NNNGemmBatchedPermuteA = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmBatchedPermuteAD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1> >; using NNNGemmBatchedPermuteB = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmBatchedPermuteBD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1> >; using NNNGemmBatchedPermuteD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1> >; using NNNGemmBatchedPermuteAB = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::NoPermute >; using NNNGemmBatchedPermuteABD = GemmPermute< cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1>, cutlass::layout::ColumnMajor, cutlass::layout::Tensor4DPermuteBMM0321ColumnMajor<D1> >; // // Profile it // Testbed<ElementInput, ElementInput, ElementOutput> testbed(options); bool result = true; result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteNone>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteA>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteAD>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteB>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteBD>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteD>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteAB>(); result &= testbed.profile_GEMM_permute<TTTGemmNormalPermuteABD>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteNone>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteA>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteAD>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteB>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteBD>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteD>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteAB>(); result &= testbed.profile_GEMM_permute<NNNGemmNormalPermuteABD>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteNone>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteA>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteAD>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteB>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteBD>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteD>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteAB>(); result &= testbed.profile_GEMM_permute<NNTGemmNormalPermuteABD>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteNone>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteA>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteAD>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteB>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteBD>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteD>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteAB>(); result &= testbed.profile_GEMM_permute<TTNGemmNormalPermuteABD>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteA>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteAD>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteB>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteBD>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteD>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteAB>(); result &= testbed.profile_GEMM_permute<TTTGemmBatchedPermuteABD>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteA>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteAD>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteB>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteBD>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteD>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteAB>(); result &= testbed.profile_GEMM_permute<NNNGemmBatchedPermuteABD>(); std::cout << "\n" "====================================================\n" "Finished (" << (result ? "PASS" : "FAIL") << ")\n" "====================================================" << std::endl; return result ? EXIT_SUCCESS : EXIT_FAILURE; } /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/39_gemm_permute/gemm_permute.cu/0
{ "file_path": "cutlass/examples/39_gemm_permute/gemm_permute.cu", "repo_id": "cutlass", "token_count": 19068 }
8
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. The epilogue rearranges the result of a matrix product through shared memory to match canonical tensor layouts in global memory. Epilogues support conversion and reduction operations. This is a copy of cutlass/epilogue/threadblock/epilogue.h that can handle "row_id" as a first argument, as uses it to get the corresponding `m_prime` / `s_prime` to rescale the output. */ #pragma once #if defined(__CUDACC_RTC__) #include <cuda/std/cassert> #else #include <assert.h> #endif #include "cutlass/aligned_buffer.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/vector.h" #include "cutlass/numeric_types.h" #include "cutlass/tensor_coord.h" #include "cutlass/gemm/gemm.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/regular_tile_iterator.h" #include "cutlass/epilogue/threadblock/epilogue_base.h" #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/epilogue/thread/scale_type.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" #include "cutlass/numeric_types.h" #include "epilogue_pipelined.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a linear combination operator to an array of elements. // output <- alpha * accumulator + beta * source // with: // alpha = 1 / s_prime (to normalize when isLast=True, 1 otherwise) // beta = alpha / m_prime (renormalize the output when the max changes) // source is the current output template < typename ElementOutput_, ///< Data type used to store tensors typename ElementSource_, //< Data type for source (usually matches //`ElementOutput`) int Count, ///< Number of elements computed per operation. ///< Usually it is 128/sizeof_bits<ElementOutput_>, ///< but we use 64 or 32 sometimes when there are not enough data ///< to store typename ElementAccumulator_, ///< Accumulator data type typename ElementCompute_, ///< Data type used to compute linear combination bool isFirst, bool isLast, typename FragmentAlphaBeta_, FloatRoundStyle Round = FloatRoundStyle::round_to_nearest> class MemoryEfficientAttentionNormalize { public: using ElementOutput = ElementOutput_; using ElementSource = ElementSource_; using ElementAccumulator = ElementAccumulator_; using ElementCompute = ElementCompute_; static int const kCount = Count; using FragmentOutput = Array<ElementOutput, kCount>; using FragmentSource = Array<ElementSource, kCount>; using FragmentAccumulator = Array<ElementAccumulator, kCount>; using ComputeFragment = Array<ElementCompute, kCount>; using FragmentAlphaBeta = FragmentAlphaBeta_; static FloatRoundStyle const kRound = Round; private: // // Data members // FragmentAlphaBeta const& s_prime_; FragmentAlphaBeta const& m_prime_; public: /// Constructs the function object, possibly loading from pointers in host /// memory CUTLASS_HOST_DEVICE MemoryEfficientAttentionNormalize( FragmentAlphaBeta const& s_prime, FragmentAlphaBeta const& m_prime) : s_prime_(s_prime), m_prime_(m_prime) {} /// Returns true if source is needed CUTLASS_HOST_DEVICE bool is_source_needed() const { return !isFirst; } /// Functionally required for serial reduction in the epilogue CUTLASS_HOST_DEVICE void set_k_partition(int k_partition, int k_partition_count) {} /// Computes linear scaling: D = alpha * accumulator + beta * source CUTLASS_HOST_DEVICE FragmentOutput operator()( int row, FragmentAccumulator const& accumulator, FragmentSource const& source) const { assert(!isFirst); // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter; NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; ComputeFragment converted_source = source_converter(source); ComputeFragment converted_accumulator = accumulator_converter(accumulator); // Perform binary operations ComputeFragment intermediate; multiplies<ComputeFragment> mul_add_source; multiply_add<ComputeFragment> mul_add_accumulator; ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1; ElementCompute beta = alpha * m_prime_[row]; intermediate = mul_add_source(beta, converted_source); // X = beta * C intermediate = mul_add_accumulator( alpha, converted_accumulator, intermediate); // D = alpha * Accum + X return destination_converter(intermediate); } /// Computes linear scaling: D = alpha * accumulator CUTLASS_HOST_DEVICE FragmentOutput operator()(int row, FragmentAccumulator const& accumulator) const { assert(isFirst); // Convert source to interal compute numeric type NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round> accumulator_converter; // Convert to destination numeric type NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter; ComputeFragment converted_accumulator = accumulator_converter(accumulator); ComputeFragment intermediate; multiplies<ComputeFragment> mul_accumulator; ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1; intermediate = mul_accumulator( alpha, converted_accumulator); // X = alpha * C + uniform return destination_converter(intermediate); } }; } // namespace thread namespace threadblock { template < typename EO, typename ES, int Count, typename EA, typename EC, bool F, bool L, typename FAB, FloatRoundStyle R> struct ApplyEpilogueOp<thread::MemoryEfficientAttentionNormalize< EO, ES, Count, EA, EC, F, L, FAB, R>> { using Op = thread:: MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>; static CUTLASS_DEVICE typename Op::FragmentOutput apply( Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum, typename Op::FragmentSource const& source) { return output_op(row_id, accum, source); } static CUTLASS_DEVICE typename Op::FragmentOutput apply( Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum) { return output_op(row_id, accum); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/41_fused_multi_head_attention/epilogue/epilogue_rescale_output.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/epilogue/epilogue_rescale_output.h", "repo_id": "cutlass", "token_count": 2907 }
9
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Instanciates the right WarpIterator to read from shared memory The class `DefaultWarpIteratorAFromSharedMemory` is useful when reading data dumped with `B2bGemm::accumToSmem`. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" #include "cutlass/platform/platform.h" #include "warp_iterator_from_smem.h" namespace cutlass { namespace gemm { namespace threadblock { template < typename WarpShape, typename InstructionShape, typename RegularWarpIterator, typename Policy, typename Enable = void> struct DefaultWarpIteratorAFromSharedMemory {}; // TensorOp - Ampere half template <typename RegularWarpIterator, typename Policy, int kInstrK> struct DefaultWarpIteratorAFromSharedMemory< cutlass::gemm::GemmShape<32, 32, 32>, cutlass::gemm::GemmShape<16, 8, kInstrK>, RegularWarpIterator, Policy, typename platform::enable_if<( sizeof_bits<typename RegularWarpIterator::Element>::value == 16 && Policy::Operator::Policy::OpDelta::kRow == 1)>::type> { using OpDelta = typename Policy::Operator::Policy::OpDelta; using WarpShape = cutlass::MatrixShape<32, 32>; using InstructionShape = cutlass::gemm::GemmShape<16, 8, kInstrK>; using WarpIterator = cutlass::gemm::warp::WarpIteratorFromSmem< cutlass::gemm::Operand::kA, typename RegularWarpIterator::Element, cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>>; }; // TensorOp - Ampere f32 template <typename WarpShape, typename RegularWarpIterator, typename Policy> struct DefaultWarpIteratorAFromSharedMemory< WarpShape, cutlass::gemm::GemmShape<16, 8, 8>, RegularWarpIterator, Policy, typename platform::enable_if<( sizeof_bits<typename RegularWarpIterator::Element>::value != 16 || Policy::Operator::Policy::OpDelta::kRow != 1)>::type> { using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; static constexpr auto kWarpSize = 32; using OpDelta = typename Policy::Operator::Policy::OpDelta; using WarpIterator = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator< cutlass::MatrixShape<WarpShape::kM, WarpShape::kK>, cutlass::gemm::Operand::kA, typename RegularWarpIterator::Element, cutlass::layout::RowMajor, cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>, OpDelta::kRow, kWarpSize>; }; // TensorOp - Volta template <typename WarpShape, typename RegularWarpIterator, typename Policy> struct DefaultWarpIteratorAFromSharedMemory< WarpShape, cutlass::gemm::GemmShape<16, 16, 4>, RegularWarpIterator, Policy> { using InstructionShape = cutlass::gemm::GemmShape<16, 16, 4>; static constexpr auto kWarpSize = 32; using OpDelta = typename Policy::Operator::Policy::OpDelta; using WarpIterator = cutlass::gemm::warp::MmaVoltaTensorOpMultiplicandTileIterator< cutlass::MatrixShape<32, 32>, // MatrixShape<WarpShape::kM, // WarpShape::kK>, cutlass::gemm::Operand::kA, typename RegularWarpIterator::Element, cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>, cutlass::MatrixShape<16, 4>, OpDelta::kRow, kWarpSize>; }; // Simt template <typename WarpShape, typename RegularWarpIterator, typename Policy> struct DefaultWarpIteratorAFromSharedMemory< WarpShape, cutlass::gemm::GemmShape<1, 1, 1>, RegularWarpIterator, Policy> { using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; static constexpr auto kWarpSize = 32; // We just use the same iterator, as we reproduced the same shared-memory // schema. Just modify it to handle non-complete tiles. using WarpIterator = RegularWarpIterator; }; } // namespace threadblock } // namespace gemm } // namespace cutlass
cutlass/examples/41_fused_multi_head_attention/iterators/default_warp_iterator_from_smem.h/0
{ "file_path": "cutlass/examples/41_fused_multi_head_attention/iterators/default_warp_iterator_from_smem.h", "repo_id": "cutlass", "token_count": 1962 }
10
{ "0": { "A_tp": "fp16", "B_tp": "fp16", "C_tp": "fp16", "Acc_tp": "fp16", "A_format": "Row", "B_format": "Col", "C_format": "Row", "mnk": [15000, 256, 32], "epilogue": { "tp": "LeakyRelu", "bias": {"addbias": false, "bias_tp": "mat"}, "args": [["float", "leaky_alpha", 1.3]] } }, "1": { "A_tp": "fp16", "B_tp": "fp16", "C_tp": "fp16", "Acc_tp": "fp16", "A_format": "Row", "B_format": "Col", "C_format": "Row", "mnk": [15000, 128, 256], "epilogue": { "tp": "LeakyRelu", "bias": {"addbias": false, "bias_tp": "mat"}, "args": [["float", "leaky_alpha", 1.3]] } }, "2": { "A_tp": "fp16", "B_tp": "fp16", "C_tp": "fp16", "Acc_tp": "fp16", "A_format": "Row", "B_format": "Col", "C_format": "Row", "mnk": [15000, 64, 128], "epilogue": { "tp": "LeakyRelu", "bias": {"addbias": false, "bias_tp": "mat"}, "args": [["float", "leaky_alpha", 1.3]] } } }
cutlass/examples/44_multi_gemm_ir_and_codegen/config.json/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/config.json", "repo_id": "cutlass", "token_count": 658 }
11
################################################################################################# # # Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################################# import helper import gen_ir as ir import gen_turing_and_volta as gen_basic class gen_verify: def __init__(self, fuse_gemm_info, gen_class_name, user_header_file, output_dir = "../"): self.fuse_gemm_info = fuse_gemm_info self.name = gen_class_name + "_verify" self.b2b_num = len(fuse_gemm_info) self.params = [] self.user_header_file = "" for header in user_header_file: self.user_header_file += "#include \"" + header + "\"\n" self.separate_cutlass = gen_basic.gen_volta_turing_fuse_act_impl(fuse_gemm_info, gen_class_name, user_header_file, output_dir) self.gen_params() self.output_dir = output_dir def gen_code(self): code = "" code += self.user_header_file code += self.separate_cutlass.gen_using(False) #False -> Turing, True -> Volta code_body = "" for i in range(self.b2b_num): code_body += " " + helper.var_idx("Gemm", i) + helper.var_idx(" gemm_op_", i) + ";\n" code_body += " " + helper.var_idx("gemm_op_", i) + helper.var_idx(".initialize(Arguments_", i) + ", nullptr);\n" code_body += self.separate_cutlass.gen_run() code += ir.gen_func(self.name, self.params, code_body) helper.write_2_headfile("cutlass_verify.h", self.output_dir, code) def gen_params(self): for i in range(self.b2b_num): self.params.append( ( helper.var_idx("typename Gemm", i)+ "::Arguments", helper.var_idx("Arguments_", i) ) ) def get_params(self, declartion = True): code = "" if declartion: for param in self.params: code += param[0] + " " + param[1] + ";\n" return code def gen_initialize(): code = "" initialize_code = self.separate_cutlass.gen_initialize() code = ir.gen_func("initialize", [[]])
cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_verify.py/0
{ "file_path": "cutlass/examples/44_multi_gemm_ir_and_codegen/ir_gen/gen_verify.py", "repo_id": "cutlass", "token_count": 1431 }
12
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Template for a double-buffered threadblock-scoped GEMM kernel. */ #pragma once #include "cutlass/aligned_buffer.h" #include "cutlass/arch/memory.h" #include "cutlass/array.h" #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/gemm/threadblock/mma_base.h" #include "dual_mma_base.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace gemm { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Structure to compute the matrix product targeting CUDA cores and SIMT math /// instructions. template < /// Size of the Gemm problem - concept: gemm::GemmShape<> typename Shape_, /// Iterates over tiles of A operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorA_, /// Iterates over tiles of A operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorA_, /// Cache operation for operand A cutlass::arch::CacheOperation::Kind CacheOpA, /// Iterates over tiles of B0 operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB0_, /// Iterates over tiles of B0 operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB0_, /// Cache operation for operand B cutlass::arch::CacheOperation::Kind CacheOpB, /// Iterates over tiles of B1 operand in global memory // (concept: ReadableTileIterator | ForwardTileIterator | // MaskedTileIterator) typename IteratorB1_, /// Iterates over tiles of B1 operand in shared memory /// (concept: WriteableTileIterator | RandomAccessTileIterator) typename SmemIteratorB1_, /// Data type of accumulator matrix typename ElementC_, /// Data type of accumulator matrix typename LayoutC_, /// Policy describing tuning details (concept: MmaPolicy) typename Policy0_, /// B1-specific version of the policy (concept: MmaPolicy) typename Policy1_, /// Number of stages, int Stages, /// Use zfill or predicate for out-of-bound cp.async SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, /// Used for partial specialization typename Enable = bool> class DualMmaMultistage : public DualMmaBase<Shape_, Policy0_, Policy1_, Stages> { public: ///< Base class using Base = DualMmaBase<Shape_, Policy0_, Policy1_, Stages>; ///< Size of the Gemm problem - concept: gemm::GemmShape<> using Shape = Shape_; ///< Iterates over tiles of A operand in global memory using IteratorA = IteratorA_; ///< Iterates over tiles of B0 operand in global memory using IteratorB0 = IteratorB0_; ///< Iterates over tiles of B1 operand in global memory using IteratorB1 = IteratorB1_; ///< Data type of accumulator matrix using ElementC = ElementC_; ///< Layout of accumulator matrix using LayoutC = LayoutC_; ///< Policy describing tuning details using Policy0 = Policy0_; using Policy1 = Policy1_; using SmemIteratorA = SmemIteratorA_; using SmemIteratorB0 = SmemIteratorB0_; using SmemIteratorB1 = SmemIteratorB1_; static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; // // Dependent types // /// Fragment of accumulator tile using FragmentC = typename Policy0::Operator::FragmentC; /// Warp-level Mma using Operator0 = typename Policy0::Operator; using Operator1 = typename Policy1::Operator; /// Minimum architecture is Sm80 to support cp.async using ArchTag = arch::Sm80; /// Complex transform on A operand static ComplexTransform const kTransformA = Operator0::kTransformA; /// Complex transform on B operand static ComplexTransform const kTransformB0 = Operator0::kTransformB; static ComplexTransform const kTransformB1 = Operator1::kTransformB; /// Internal structure exposed for introspection. struct Detail { /// Number of cp.async instructions to load one stage of operand A static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; /// Number of cp.async instructions to load one stage of operand B static int const AsyncCopyIterationsPerStageB = IteratorB0::ThreadMap::Iterations::kCount; /// Number of stages static int const kStages = Stages; /// Number of cp.async instructions to load on group of operand A static int const kAccessesPerGroupA = (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; /// Number of cp.async instructions to load on group of operand B static int const kAccessesPerGroupB = (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / Base::kWarpGemmIterations; }; private: using WarpLoadedFragmentA = typename Operator0::FragmentA; using WarpLoadedFragmentB0 = typename Operator0::FragmentB; using WarpLoadedFragmentB1 = typename Operator1::FragmentB; using WarpTransformedFragmentA = typename Operator0::TransformedFragmentA; using WarpTransformedFragmentB0 = typename Operator0::TransformedFragmentB; using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB; private: // // Data members // /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA smem_iterator_A_; /// Iterator to write threadblock-scoped tile of B operand to shared memory SmemIteratorB0 smem_iterator_B0_; SmemIteratorB1 smem_iterator_B1_; public: /// Construct from tensor references CUTLASS_DEVICE DualMmaMultistage( ///< Shared storage needed for internal use by threadblock-scoped GEMM typename Base::SharedStorage &shared_storage, ///< ID within the threadblock int thread_idx, ///< ID of warp int warp_idx, ///< ID of each thread within a warp int lane_idx ): Base(shared_storage, thread_idx, warp_idx, lane_idx), smem_iterator_A_(shared_storage.operand_A_ref(), thread_idx), smem_iterator_B0_(shared_storage.operand_B0_ref(), thread_idx), smem_iterator_B1_(shared_storage.operand_B1_ref(), thread_idx) { // Compute warp location within threadblock tile by mapping the warp_id to // three coordinates: // _m: the warp's position within the threadblock along the M dimension // _n: the warp's position within the threadblock along the N dimension // _k: the warp's position within the threadblock along the K dimension int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; // Add per-warp offsets in units of warp-level tiles this->warp_tile_iterator_A_.add_tile_offset( {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); this->warp_tile_iterator_B0_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); this->warp_tile_iterator_B1_.add_tile_offset( {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); } CUTLASS_DEVICE void copy_tiles_and_advance(IteratorA &iterator_A, IteratorB0 &iterator_B0, IteratorB1 &iterator_B1, int group_start_A = 0, int group_start_B = 0) { iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); this->smem_iterator_A_.set_iteration_index(group_start_A); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_A.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpA>( dst_ptr + v, gmem_ptr, iterator_A.valid()); } ++iterator_A; } ++this->smem_iterator_A_; } } iterator_B0.set_iteration_index(group_start_B * IteratorB0::kAccessesPerVector); iterator_B1.set_iteration_index(group_start_B * IteratorB1::kAccessesPerVector); this->smem_iterator_B0_.set_iteration_index(group_start_B); this->smem_iterator_B1_.set_iteration_index(group_start_B); // Async Copy for operand B0 CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / IteratorB0::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B0.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B0.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B0.valid()); } ++iterator_B0; } ++this->smem_iterator_B0_; } } // Async Copy for operand B1 CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / IteratorB1::kAccessesPerVector / 8; CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { auto gmem_ptr = iterator_B1.get(); if (SharedMemoryClear == SharedMemoryClearOption::kZfill) { cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B1.valid()); } else { cutlass::arch::cp_async<kSrcBytes, kCacheOpB>( dst_ptr + v, gmem_ptr, iterator_B1.valid()); } ++iterator_B1; } ++this->smem_iterator_B1_; } } } /// Perform a threadblock-scoped matrix multiply-accumulate CUTLASS_DEVICE void operator()( ///< problem size of GEMM int gemm_k_iterations, ///< destination accumulator tile FragmentC &accum0, FragmentC &accum1, ///< iterator over A operand in global memory IteratorA iterator_A, ///< iterator over B operand in global memory IteratorB0 iterator_B0, IteratorB1 iterator_B1, ///< initial value of accumulator FragmentC const &src_accum0, FragmentC const &src_accum1 ) { // // Prologue // // Issue several complete stages CUTLASS_PRAGMA_UNROLL for (int stage = 0; stage < Base::kStages - 1; ++stage, --gemm_k_iterations) { iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B0.clear_mask(gemm_k_iterations == 0); iterator_B1.clear_mask(gemm_k_iterations == 0); iterator_A.set_iteration_index(0); this->smem_iterator_A_.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( this->smem_iterator_A_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value * IteratorA::ThreadMap::kElementsPerAccess / IteratorA::kAccessesPerVector / 8; int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>( dst_ptr + v, iterator_A.get(), iterator_A.valid()); ++iterator_A; } ++this->smem_iterator_A_; } iterator_B0.set_iteration_index(0); iterator_B1.set_iteration_index(0); this->smem_iterator_B0_.set_iteration_index(0); this->smem_iterator_B1_.set_iteration_index(0); // Async Copy for operand B0 CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( this->smem_iterator_B0_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB0::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB0::Element>::value * IteratorB0::ThreadMap::kElementsPerAccess / IteratorB0::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B0.get(), iterator_B0.valid()); ++iterator_B0; } ++this->smem_iterator_B0_; } // Async Copy for operand B1 CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( this->smem_iterator_B1_.get()); CUTLASS_PRAGMA_UNROLL for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value * IteratorB1::ThreadMap::kElementsPerAccess / IteratorB1::kAccessesPerVector / 8; cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>( dst_ptr + v, iterator_B1.get(), iterator_B1.valid()); ++iterator_B1; } ++this->smem_iterator_B1_; } // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B0.add_tile_offset({1, 0}); iterator_B1.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Defines the boundary of a stage of cp.async. cutlass::arch::cp_async_fence(); } // Perform accumulation in the 'd' output operand accum0 = src_accum0; accum1 = src_accum1; // // Clear the remaining tiles of SMEM. This is a functional requirement for some kernels // so that all accumulator elements outside the GEMM footprint are zero. // if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) { /// Iterator to write threadblock-scoped tile of A operand to shared memory SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_); typename IteratorA::AccessType zero_A; zero_A.clear(); last_smem_iterator_A.set_iteration_index(0); // Async Copy for operand A CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { typename IteratorA::AccessType *dst_ptr = reinterpret_cast<typename IteratorA::AccessType *>( last_smem_iterator_A.get()); *dst_ptr = zero_A; ++last_smem_iterator_A; } typename IteratorB0::AccessType zero_B; zero_B.clear(); /// Iterator to write threadblock-scoped tile of B0 operand to shared memory SmemIteratorB0 last_smem_iterator_B0(this->smem_iterator_B0_); last_smem_iterator_B0.set_iteration_index(0); // Async Copy for operand B0 CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB0::AccessType *dst_ptr = reinterpret_cast<typename IteratorB0::AccessType *>( last_smem_iterator_B0.get()); *dst_ptr = zero_B; ++last_smem_iterator_B0; } /// Iterator to write threadblock-scoped tile of B1 operand to shared memory SmemIteratorB1 last_smem_iterator_B1(this->smem_iterator_B1_); last_smem_iterator_B1.set_iteration_index(0); // Async Copy for operand B1 CUTLASS_PRAGMA_UNROLL for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { typename IteratorB1::AccessType *dst_ptr = reinterpret_cast<typename IteratorB1::AccessType *>( last_smem_iterator_B1.get()); *dst_ptr = zero_B; ++last_smem_iterator_B1; } } // Waits until stages up to the previous (kStages-2)th stage have committed. cutlass::arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Pair of fragments used to overlap shared memory loads and math // instructions WarpLoadedFragmentA warp_loaded_frag_A[2]; WarpLoadedFragmentB0 warp_loaded_frag_B0[2]; WarpLoadedFragmentB1 warp_loaded_frag_B1[2]; WarpTransformedFragmentA warp_transformed_frag_A[2]; WarpTransformedFragmentB0 warp_transformed_frag_B0[2]; WarpTransformedFragmentB1 warp_transformed_frag_B1[2]; Operator0 warp_mma0; Operator1 warp_mma1; this->warp_tile_iterator_A_.set_kgroup_index(0); this->warp_tile_iterator_B0_.set_kgroup_index(0); this->warp_tile_iterator_B1_.set_kgroup_index(0); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[0]); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[0]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B0_; ++this->warp_tile_iterator_B1_; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B0.clear_mask(gemm_k_iterations == 0); iterator_B1.clear_mask(gemm_k_iterations == 0); int smem_write_stage_idx = Base::kStages - 1; int smem_read_stage_idx = 0; warp_mma0.transform(warp_transformed_frag_A[0], warp_transformed_frag_B0[0], warp_loaded_frag_A[0], warp_loaded_frag_B0[0]); warp_mma1.transform(warp_transformed_frag_A[0], warp_transformed_frag_B1[0], warp_loaded_frag_A[0], warp_loaded_frag_B1[0]); // tf32x3 kernels use staging accumulation. warp_mma uses a temporary // accumulator and this temporary accumulator is added to the final // accumulator once in every mainloop iteration. plus<FragmentC> plus_accum; FragmentC tmp_accum0, tmp_accum1; if (platform::is_same<typename Operator0::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator0::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { tmp_accum0.clear(); tmp_accum1.clear(); } // // Mainloop // CUTLASS_GEMM_LOOP for (; gemm_k_iterations > (-Base::kStages + 1);) { // // Loop over GEMM K dimension // // Computes a warp-level GEMM on data held in shared memory // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate CUTLASS_PRAGMA_UNROLL for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { // Load warp-level tiles from shared memory, wrapping to k offset if // this is the last group as the case may be. this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B0_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_B1_.set_kgroup_index((warp_mma_k + 1) % Base::kWarpGemmIterations); this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B0_.load(warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); this->warp_tile_iterator_B1_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); ++this->warp_tile_iterator_A_; ++this->warp_tile_iterator_B0_; ++this->warp_tile_iterator_B1_; if (warp_mma_k > 0) { warp_mma0.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B0[warp_mma_k % 2]); warp_mma1.transform(warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], warp_loaded_frag_A[warp_mma_k % 2], warp_loaded_frag_B1[warp_mma_k % 2]); } if (platform::is_same<typename Operator0::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator0::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { warp_mma0( tmp_accum0, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], tmp_accum0 ); warp_mma1( tmp_accum1, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], tmp_accum1 ); if (warp_mma_k == 0) { accum0 = plus_accum(accum0, tmp_accum0); accum1 = plus_accum(accum1, tmp_accum1); tmp_accum0.clear(); tmp_accum1.clear(); } } else { warp_mma0( accum0, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B0[warp_mma_k % 2], accum0 ); warp_mma1( accum1, warp_transformed_frag_A[warp_mma_k % 2], warp_transformed_frag_B1[warp_mma_k % 2], accum1 ); } // Issue global->shared copies for the this stage if (warp_mma_k < Base::kWarpGemmIterations - 1) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; copy_tiles_and_advance(iterator_A, iterator_B0, iterator_B1, group_start_iteration_A, group_start_iteration_B); } if (warp_mma_k + 2 == Base::kWarpGemmIterations) { int group_start_iteration_A, group_start_iteration_B; group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; copy_tiles_and_advance(iterator_A, iterator_B0, iterator_B1, group_start_iteration_A, group_start_iteration_B); // Inserts a memory fence between stages of cp.async instructions. cutlass::arch::cp_async_fence(); // Waits until stages up to the previous (kStages-2)th stage have committed. arch::cp_async_wait<Base::kStages - 2>(); __syncthreads(); // Move to the next stage iterator_A.add_tile_offset({0, 1}); iterator_B0.add_tile_offset({1, 0}); iterator_B1.add_tile_offset({1, 0}); this->smem_iterator_A_.add_tile_offset({0, 1}); this->smem_iterator_B0_.add_tile_offset({1, 0}); this->smem_iterator_B1_.add_tile_offset({1, 0}); // Add negative offsets to return iterators to the 'start' of the // circular buffer in shared memory if (smem_write_stage_idx == (Base::kStages - 1)) { this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); this->smem_iterator_B0_.add_tile_offset({-Base::kStages, 0}); this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); smem_write_stage_idx = 0; } else { ++smem_write_stage_idx; } if (smem_read_stage_idx == (Base::kStages - 1)) { this->warp_tile_iterator_A_.add_tile_offset( {0, -Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations}); this->warp_tile_iterator_B0_.add_tile_offset( {-Base::kStages * Policy0::kPartitionsK * Base::kWarpGemmIterations, 0}); this->warp_tile_iterator_B1_.add_tile_offset( {-Base::kStages * Policy1::kPartitionsK * Base::kWarpGemmIterations, 0}); smem_read_stage_idx = 0; } else { ++smem_read_stage_idx; } --gemm_k_iterations; iterator_A.clear_mask(gemm_k_iterations == 0); iterator_B0.clear_mask(gemm_k_iterations == 0); iterator_B1.clear_mask(gemm_k_iterations == 0); } // Do any conversions feeding the first stage at the end of the loop so // we can start right away on mma instructions if (warp_mma_k + 1 == Base::kWarpGemmIterations) { warp_mma0.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B0[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B0[(warp_mma_k + 1) % 2]); warp_mma1.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], warp_transformed_frag_B1[(warp_mma_k + 1) % 2], warp_loaded_frag_A[(warp_mma_k + 1) % 2], warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); } } } if (platform::is_same<typename Operator0::MathOperator, arch::OpMultiplyAddFastF32>::value || platform::is_same<typename Operator0::MathOperator, arch::OpMultiplyAddComplexFastF32>::value) { accum0 = plus_accum(accum0, tmp_accum0); accum1 = plus_accum(accum1, tmp_accum1); } // commit and drain all pending and predicated cp.async pnz from the GEMM mainloop cutlass::arch::cp_async_fence(); cutlass::arch::cp_async_wait<0>(); __syncthreads(); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/examples/45_dual_gemm/threadblock/dual_mma_multistage.h/0
{ "file_path": "cutlass/examples/45_dual_gemm/threadblock/dual_mma_multistage.h", "repo_id": "cutlass", "token_count": 13243 }
13
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <cstdlib> #include <cstdio> #include <cassert> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cute/tensor.hpp> #include "cutlass/util/print_error.hpp" #include "cutlass/util/GPU_Clock.hpp" #include "cutlass/util/helper_cuda.hpp" template <class ProblemShape, class CtaTiler, class TA, class AStride, class ASmemLayout, class AThreadLayout, class TB, class BStride, class BSmemLayout, class BThreadLayout, class TC, class CStride, class CSmemLayout, class CThreadLayout, class Alpha, class Beta> __global__ static __launch_bounds__(decltype(size(CThreadLayout{}))::value) void gemm_device(ProblemShape shape_MNK, CtaTiler cta_tiler, TA const* A, AStride dA, ASmemLayout sA_layout, AThreadLayout tA, TB const* B, BStride dB, BSmemLayout sB_layout, BThreadLayout tB, TC * C, CStride dC, CSmemLayout , CThreadLayout tC, Alpha alpha, Beta beta) { using namespace cute; // Preconditions CUTE_STATIC_ASSERT_V(rank(shape_MNK) == Int<3>{}); // (M, N, K) CUTE_STATIC_ASSERT_V(rank(cta_tiler) == Int<3>{}); // (BLK_M, BLK_N, BLK_K) static_assert(is_static<AThreadLayout>::value); static_assert(is_static<BThreadLayout>::value); static_assert(is_static<CThreadLayout>::value); CUTE_STATIC_ASSERT_V(size(tA) == size(tB)); // NumThreads CUTE_STATIC_ASSERT_V(size(tC) == size(tA)); // NumThreads CUTE_STATIC_ASSERT_V(size<0>(cta_tiler) % size<0>(tA) == Int<0>{}); // BLK_M / THR_M CUTE_STATIC_ASSERT_V(size<2>(cta_tiler) % size<1>(tA) == Int<0>{}); // BLK_K / THR_K CUTE_STATIC_ASSERT_V(size<1>(cta_tiler) % size<0>(tB) == Int<0>{}); // BLK_N / THR_N CUTE_STATIC_ASSERT_V(size<2>(cta_tiler) % size<1>(tB) == Int<0>{}); // BLK_K / THR_K CUTE_STATIC_ASSERT_V(size<0>(cta_tiler) % size<0>(tC) == Int<0>{}); // BLK_M / THR_M CUTE_STATIC_ASSERT_V(size<1>(cta_tiler) % size<1>(tC) == Int<0>{}); // BLK_N / THR_N static_assert(is_static<ASmemLayout>::value); static_assert(is_static<BSmemLayout>::value); static_assert(is_static<CSmemLayout>::value); CUTE_STATIC_ASSERT_V(size<0>(ASmemLayout{}) == size<0>(cta_tiler)); // BLK_M CUTE_STATIC_ASSERT_V(size<0>(CSmemLayout{}) == size<0>(cta_tiler)); // BLK_M CUTE_STATIC_ASSERT_V(size<0>(BSmemLayout{}) == size<1>(cta_tiler)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(CSmemLayout{}) == size<1>(cta_tiler)); // BLK_N CUTE_STATIC_ASSERT_V(size<1>(ASmemLayout{}) == size<2>(cta_tiler)); // BLK_K CUTE_STATIC_ASSERT_V(size<1>(BSmemLayout{}) == size<2>(cta_tiler)); // BLK_K CUTE_STATIC_ASSERT_V(congruent(select<0,2>(shape_MNK), dA)); // dA strides for shape MK CUTE_STATIC_ASSERT_V(congruent(select<1,2>(shape_MNK), dB)); // dB strides for shape NK CUTE_STATIC_ASSERT_V(congruent(select<0,1>(shape_MNK), dC)); // dC strides for shape MN // // Full and Tiled Tensors // // Represent the full tensors Tensor mA = make_tensor(make_gmem_ptr(A), select<0,2>(shape_MNK), dA); // (M,K) Tensor mB = make_tensor(make_gmem_ptr(B), select<1,2>(shape_MNK), dB); // (N,K) Tensor mC = make_tensor(make_gmem_ptr(C), select<0,1>(shape_MNK), dC); // (M,N) // Get the appropriate blocks for this thread block auto cta_coord = make_coord(blockIdx.x, blockIdx.y, _); // (m,n,k) Tensor gA = local_tile(mA, cta_tiler, cta_coord, Step<_1, X,_1>{}); // (BLK_M,BLK_K,k) Tensor gB = local_tile(mB, cta_tiler, cta_coord, Step< X,_1,_1>{}); // (BLK_N,BLK_K,k) Tensor gC = local_tile(mC, cta_tiler, cta_coord, Step<_1,_1, X>{}); // (BLK_M,BLK_N) // Shared memory buffers __shared__ TA smemA[cosize_v<ASmemLayout>]; __shared__ TB smemB[cosize_v<BSmemLayout>]; Tensor sA = make_tensor(make_smem_ptr(smemA), sA_layout); // (BLK_M,BLK_K) Tensor sB = make_tensor(make_smem_ptr(smemB), sB_layout); // (BLK_N,BLK_K) // // Partition the copying of A and B tiles across the threads // // TUTORIAL: Example of simple raked partitioning of ThreadLayouts tA|tB over data A|B tiles Tensor tAgA = local_partition(gA, tA, threadIdx.x); // (THR_M,THR_K,k) Tensor tAsA = local_partition(sA, tA, threadIdx.x); // (THR_M,THR_K) Tensor tBgB = local_partition(gB, tB, threadIdx.x); // (THR_N,THR_K,k) Tensor tBsB = local_partition(sB, tB, threadIdx.x); // (THR_N,THR_K) CUTE_STATIC_ASSERT_V(size<0>(tAgA) == size<0>(tAsA)); // THR_M CUTE_STATIC_ASSERT_V(size<1>(tAgA) == size<1>(tAsA)); // THR_K CUTE_STATIC_ASSERT_V(size<0>(tBgB) == size<0>(tBsB)); // THR_N CUTE_STATIC_ASSERT_V(size<1>(tBgB) == size<1>(tBsB)); // THR_K // // Define A/B partitioning and C accumulators // // TUTORIAL: Example of partitioning via projections of a ThreadLayout tC // Partition sA (M,K) by the rows of tC Tensor tCsA = local_partition(sA, tC, threadIdx.x, Step<_1, X>{}); // (THR_M,BLK_K) // Partition sB (N,K) by the cols of tC Tensor tCsB = local_partition(sB, tC, threadIdx.x, Step< X,_1>{}); // (THR_N,BLK_K) // Partition gC (M,N) by the tile of tC Tensor tCgC = local_partition(gC, tC, threadIdx.x, Step<_1,_1>{}); // (THR_M,THR_N) // Allocate the accumulators -- same shape/layout as the partitioned data Tensor tCrC = make_tensor_like(tCgC); // (THR_M,THR_N) CUTE_STATIC_ASSERT_V(size<0>(tCrC) == size<0>(tCgC)); // THR_M CUTE_STATIC_ASSERT_V(size<0>(tCrC) == size<0>(tCsA)); // THR_M CUTE_STATIC_ASSERT_V(size<1>(tCrC) == size<1>(tCgC)); // THR_N CUTE_STATIC_ASSERT_V(size<1>(tCrC) == size<0>(tCsB)); // THR_N CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCsB)); // BLK_K // Clear the accumulators clear(tCrC); #if 0 if(thread0()) { print(" mA : "); print( mA); print("\n"); print(" gA : "); print( gA); print("\n"); print(" sA : "); print( sA); print("\n"); print("tAgA : "); print(tAgA); print("\n"); print("tAsA : "); print(tAsA); print("\n"); } #endif #if 0 if(thread0()) { print(" mB : "); print( mB); print("\n"); print(" gB : "); print( gB); print("\n"); print(" sB : "); print( sB); print("\n"); print("tBgB : "); print(tBgB); print("\n"); print("tBsB : "); print(tBsB); print("\n"); } #endif #if 0 if(thread0()) { print(" mC : "); print( mC); print("\n"); print(" gC : "); print( gC); print("\n"); print("tCsA : "); print(tCsA); print("\n"); print("tCsB : "); print(tCsB); print("\n"); print("tCgC : "); print(tCgC); print("\n"); print("tCrC : "); print(tCrC); print("\n"); } #endif #if 1 // TUTORIAL: Example of a simple mainloop that read tiles of data into shared memory, // and then computes on those tiles. // copy(.) operates on the global and shared memory via the tA|tB partitioning // gemm(.) operates on the shared and register memory via the tC partitioning auto K_TILE_MAX = size<2>(tAgA); for (int k_tile = 0; k_tile < K_TILE_MAX; ++k_tile) { // Copy gmem to smem with tA|tB thread-partitioned tensors copy(tAgA(_,_,k_tile), tAsA); // A (THR_M,THR_K) -> (THR_M,THR_K) copy(tBgB(_,_,k_tile), tBsB); // B (THR_N,THR_K) -> (THR_N,THR_K) // TUTORIAL: The above call to copy(tAgA(_,_,k_tile), tAsA) is equivalent to // Tensor tAgAk = tAgA(_,_,k_tile); // CUTE_UNROLL // for (int i = 0; i < size(tAsA); ++i) { // tAsA(i) = tAgAk(i); // } cp_async_fence(); // Label the end of (potential) cp.async instructions cp_async_wait<0>(); // Sync on all (potential) cp.async instructions __syncthreads(); // Wait for all threads to write to smem // Compute gemm on tC thread-partitioned smem gemm(tCsA, tCsB, tCrC); // (THR_M,THR_N) += (THR_M,BLK_K) * (THR_N,BLK_K) // TUTORIAL: The above call to gemm(tCsA, tCsB, tCrC) is equivalent to // CUTE_UNROLL // for (int k = 0; k < size<1>(tCsA); ++k) { // CUTE_UNROLL // for (int m = 0; m < size<0>(tCrC); ++m) { // CUTE_UNROLL // for (int n = 0; n < size<1>(tCrC); ++n) { // tCrC(m,n) += tCsA(m,k) * tCsB(n,k); // } // } // } __syncthreads(); // Wait for all threads to read from smem } #endif // // Epilogue // axpby(alpha, tCrC, beta, tCgC); // TUTORIAL: The above call to axpby(alpha, tCrC, beta, tCgC) is equivalent to // CUTE_UNROLL // for (int i = 0; i < size(tCsA); ++i) { // tCgC(i) = alpha * tCrC(i) + beta * tCgC(i); // } } // Setup params for an NT GEMM // Use m-major smem sA, n-major smem sB, and mn-major threads tA|tB template <class TA, class TB, class TC, class Alpha, class Beta> void gemm_nt(int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { using namespace cute; // Define shapes (dynamic) auto M = int(m); auto N = int(n); auto K = int(k); auto prob_shape = make_shape(M, N, K); // (M, N, K) // Define NT strides (mixed) auto dA = make_stride(Int<1>{}, ldA); // (dM, dK) auto dB = make_stride(Int<1>{}, ldB); // (dN, dK) auto dC = make_stride(Int<1>{}, ldC); // (dM, dN) // Define CTA tile sizes (static) auto bM = Int<128>{}; auto bN = Int<128>{}; auto bK = Int< 8>{}; auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K) // Define the smem layouts (static) auto sA = make_layout(make_shape(bM, bK)); // (m,k) -> smem_idx; m-major auto sB = make_layout(make_shape(bN, bK)); // (n,k) -> smem_idx; n-major auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major // Define the thread layouts (static) auto tA = make_layout(make_shape(Int<32>{}, Int< 8>{})); // (m,k) -> thr_idx auto tB = make_layout(make_shape(Int<32>{}, Int< 8>{})); // (n,k) -> thr_idx auto tC = make_layout(make_shape(Int<16>{}, Int<16>{})); // (m,n) -> thr_idx dim3 dimBlock(size(tC)); dim3 dimGrid(size(ceil_div(M, bM)), size(ceil_div(N, bN))); gemm_device<<<dimGrid, dimBlock, 0, stream>>> (prob_shape, cta_tiler, A, dA, sA, tA, B, dB, sB, tB, C, dC, sC, tC, alpha, beta); } // Setup params for a TN GEMM // Use padded m-major smem sA, padded n-major smem sB, and k-major threads tA|tB template <class TA, class TB, class TC, class Alpha, class Beta> void gemm_tn(int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { using namespace cute; // Define shapes (dynamic) auto M = int(m); auto N = int(n); auto K = int(k); auto prob_shape = make_shape(M, N, K); // (M, N, K) // Define TN strides (mixed) auto dA = make_stride(ldA, Int<1>{}); // (dM, dK) auto dB = make_stride(ldB, Int<1>{}); // (dN, dK) auto dC = make_stride(Int<1>{}, ldC); // (dM, dN) // Define CTA tile sizes (static) auto bM = Int<128>{}; auto bN = Int<128>{}; auto bK = Int< 8>{}; auto cta_tiler = make_shape(bM, bN, bK); // (BLK_M, BLK_N, BLK_K) // Define the smem layouts (static) auto sA = make_layout(make_shape(bM,bK), LayoutRight{}); // (m,k) -> smem_idx; k-major auto sB = make_layout(make_shape(bN,bK), LayoutRight{}); // (n,k) -> smem_idx; k-major auto sC = make_layout(make_shape(bM, bN)); // (m,n) -> smem_idx; m-major // Define the thread layouts (static) auto tA = make_layout(make_shape(Int<32>{}, Int< 8>{}), LayoutRight{}); // (m,k) -> thr_idx; k-major auto tB = make_layout(make_shape(Int<32>{}, Int< 8>{}), LayoutRight{}); // (n,k) -> thr_idx; k-major auto tC = make_layout(make_shape(Int<16>{}, Int<16>{})); // (m,n) -> thr_idx; m-major dim3 dimBlock(size(tC)); dim3 dimGrid(size(ceil_div(M, bM)), size(ceil_div(N, bN))); gemm_device<<<dimGrid, dimBlock, 0, stream>>> (prob_shape, cta_tiler, A, dA, sA, tA, B, dB, sB, tB, C, dC, sC, tC, alpha, beta); } template <class TA, class TB, class TC, class Alpha, class Beta> void gemm(char transA, char transB, int m, int n, int k, Alpha alpha, TA const* A, int ldA, TB const* B, int ldB, Beta beta, TC * C, int ldC, cudaStream_t stream = 0) { if (transA == 'N' && transB == 'T') { return gemm_nt(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream); } else if (transA == 'T' && transB == 'N') { return gemm_tn(m, n, k, alpha, A, ldA, B, ldB, beta, C, ldC, stream); } assert(false && "Not implemented"); } int main(int argc, char** argv) { int m = 5120; if (argc >= 2) sscanf(argv[1], "%d", &m); int n = 5120; if (argc >= 3) sscanf(argv[2], "%d", &n); int k = 4096; if (argc >= 4) sscanf(argv[3], "%d", &k); char transA = 'N'; if (argc >= 5) sscanf(argv[4], "%c", &transA); char transB = 'T'; if (argc >= 6) sscanf(argv[5], "%c", &transB); using TA = float; using TB = float; using TC = float; using TI = float; TI alpha = 1.0; TI beta = 0.0; std::cout << "M = " << m << std::endl; std::cout << "N = " << n << std::endl; std::cout << "K = " << k << std::endl; std::cout << "C = A^" << transA << " B^" << transB << std::endl; cute::device_init(0); thrust::host_vector<TA> h_A(m*k); thrust::host_vector<TB> h_B(n*k); thrust::host_vector<TC> h_C(m*n); for (int j = 0; j < m*k; ++j) h_A[j] = static_cast<TA>( 2*(rand() / double(RAND_MAX)) - 1 ); for (int j = 0; j < n*k; ++j) h_B[j] = static_cast<TB>( 2*(rand() / double(RAND_MAX)) - 1 ); for (int j = 0; j < m*n; ++j) h_C[j] = static_cast<TC>(-1); thrust::device_vector<TA> d_A = h_A; thrust::device_vector<TB> d_B = h_B; thrust::device_vector<TC> d_C = h_C; double gflops = (2.0*m*n*k) * 1e-9; const int timing_iterations = 100; GPU_Clock timer; int ldA = 0, ldB = 0, ldC = m; if (transA == 'N') { ldA = m; } else if (transA == 'T') { ldA = k; } else { assert(false); } if (transB == 'N') { ldB = k; } else if (transB == 'T') { ldB = n; } else { assert(false); } // Run once d_C = h_C; gemm(transA, transB, m, n, k, alpha, d_A.data().get(), ldA, d_B.data().get(), ldB, beta, d_C.data().get(), ldC); CUTE_CHECK_LAST(); thrust::host_vector<TC> cute_result = d_C; // Timing iterations timer.start(); for (int i = 0; i < timing_iterations; ++i) { gemm(transA, transB, m, n, k, alpha, d_A.data().get(), ldA, d_B.data().get(), ldB, beta, d_C.data().get(), ldC); } double cute_time = timer.seconds() / timing_iterations; CUTE_CHECK_LAST(); printf("CUTE_GEMM: [%6.1f]GFlop/s (%6.4f)ms\n", gflops / cute_time, cute_time*1000); return 0; }
cutlass/examples/cute/tutorial/sgemm_1.cu/0
{ "file_path": "cutlass/examples/cute/tutorial/sgemm_1.cu", "repo_id": "cutlass", "token_count": 8242 }
14
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/util/type_traits.hpp> #include <cute/numeric/integral_constant.hpp> // cute::true_type, cute::false_type #include <cute/numeric/integer_sequence.hpp> #include <cute/container/cuda_types.hpp> //#include <cute/container/array.hpp> // Advanced optimizations // // cute::tuple is like std::tuple, with two differences. // // 1. It works on both host and device. // 2. Its template arguments must be semiregular types. // // Semiregular types are default constructible and copyable. // They include "value types" like int or float, // but do _not_ include references like int& or float&. // (See std::tie for an example of a tuple of references.) // // This is simplified over the implementations in std::, cuda::std::, and thrust:: by ignoring much of // the conversion SFINAE, special overloading, and avoiding cvref template types. // Furthermore, the empty base optimization (EBO) is MORE aggressive by avoiding // construction calls, and ignoring any need for unique element addresses. // // Over standard-conforming tuple implementations, this appears to accelerate compilation times by over 3x. namespace cute { namespace detail { // EBO stands for "empty base optimization." // We use this technique to ensure that cute::tuple // doesn't need to waste space storing any template arguments // of cute::tuple that have no data (like integral_constant). // Otherwise, cute::tuple would need to spend at least 1 byte // for each of its template arguments. // // EBO always "holds" a single value of type T. // N is like an array index that TupleBase uses // to access the desired tuple element. template <size_t N, class T, bool IsEmpty = is_empty<T>::value> struct EBO; template <class T, size_t N, bool B> CUTE_HOST_DEVICE constexpr C<N> findt(EBO<N, T, B> const&) { return {}; } // Specialization for types T that have no data; // the "static tuple leaf." Valid T here include // integral_constant<U, Value>, Int<Value>, // and any other semiregular type // for which std::is_empty_v<T> is true. template <size_t N, class T> struct EBO<N, T, true> { CUTE_HOST_DEVICE constexpr EBO() {} CUTE_HOST_DEVICE constexpr EBO(T const&) {} }; template <size_t N, class T> CUTE_HOST_DEVICE constexpr T getv(EBO<N, T, true> const&) { return {}; } // Specialization for types T that are not empty; // the "dynamic tuple leaf." Valid T here include int, // any other integral or floating-point type, // or any semiregular type for which std::is_empty_v<T> is false. template <size_t N, class T> struct EBO<N, T, false> { CUTE_HOST_DEVICE constexpr EBO() : t_{} {} template <class U> CUTE_HOST_DEVICE constexpr EBO(U const& u) : t_{u} {} T t_; }; template <size_t N, class T> CUTE_HOST_DEVICE constexpr T const& getv(EBO<N, T, false> const& x) { return x.t_; } template <size_t N, class T> CUTE_HOST_DEVICE constexpr T& getv(EBO<N, T, false>& x) { return x.t_; } template <size_t N, class T> CUTE_HOST_DEVICE constexpr T&& getv(EBO<N, T, false>&& x) { return cute::move(x.t_); } template <class IdxSeq, class... T> struct TupleBase; // Base class of cute::tuple binds each element to an index // by inheriting from EBO<i, t> for each (i, t) in (I..., T...). // The storage (for nonempty t) lives in the base classes. template <size_t... I, class... T> struct TupleBase<index_sequence<I...>, T...> : EBO<I,T>... { CUTE_HOST_DEVICE constexpr TupleBase() {} template <class... U> CUTE_HOST_DEVICE constexpr explicit TupleBase(U const&... u) : EBO<I,T>(u)... {} template <class... U> CUTE_HOST_DEVICE constexpr TupleBase(TupleBase<index_sequence<I...>, U...> const& u) : EBO<I,T>(getv(static_cast<EBO<I,U> const&>(u)))... {} }; } // end namespace detail // Attempting to use the following commented-out alias // in the declaration of `struct tuple` causes MSVC 2022 build errors. // //template <class... T> //using TupleBase = detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>; // This is the actual cute::tuple class. // The storage (if any) lives in TupleBase's EBO base classes. // // Inheriting from the above alias TupleBase // causes MSVC 2022 build errors when assigning one tuple to another: // In summary: this is verbose as a work-around for MSVC build errors. template <class... T> struct tuple : detail::TupleBase<make_index_sequence<sizeof...(T)>, T...> { CUTE_HOST_DEVICE constexpr tuple() {} template <class... U> CUTE_HOST_DEVICE constexpr tuple(U const&... u) : detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>(u...) {} template <class... U> CUTE_HOST_DEVICE constexpr tuple(tuple<U...> const& u) : detail::TupleBase<make_index_sequence<sizeof...(T)>, T...>(static_cast<detail::TupleBase<make_index_sequence<sizeof...(U)>, U...> const&>(u)) {} }; // // get for cute::tuple (just like std::get for std::tuple) // template <size_t I, class... T> CUTE_HOST_DEVICE constexpr decltype(auto) get(tuple<T...> const& t) noexcept { static_assert(I < sizeof...(T), "Index out of range"); return detail::getv<I>(t); } template <size_t I, class... T> CUTE_HOST_DEVICE constexpr decltype(auto) get(tuple<T...>& t) noexcept { static_assert(I < sizeof...(T), "Index out of range"); return detail::getv<I>(t); } template <size_t I, class... T> CUTE_HOST_DEVICE constexpr decltype(auto) get(tuple<T...>&& t) noexcept { static_assert(I < sizeof...(T), "Index out of range"); return detail::getv<I>(static_cast<tuple<T...>&&>(t)); } // // find a type X within a cute::tuple // Requires X to be unique in tuple // Returns a static integer // template <class X, class... T> CUTE_HOST_DEVICE constexpr auto find(tuple<T...> const& t) noexcept { return detail::findt<X>(t); } // // Custom is_tuple trait simply checks the existence of tuple_size // and assumes std::get<I>(.), std::tuple_element<I,.> // namespace detail { template <class T> auto has_tuple_size( T*) -> bool_constant<(0 <= tuple_size<T>::value)>; auto has_tuple_size(...) -> false_type; } // end namespace detail template <class T> struct is_tuple : decltype(detail::has_tuple_size((T*)0)) {}; // // make_tuple (value-based implementation) // template <class... T> CUTE_HOST_DEVICE constexpr tuple<T...> make_tuple(T const&... t) { return {t...}; } // // tuple_cat concatenates multiple cute::tuple into a single cute::tuple, // just like std::tuple_cat for std::tuple. // #if 0 // Original implementation namespace detail { template <class T0, class T1, size_t... I0, size_t... I1> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, index_sequence<I0...>, index_sequence<I1...>) { return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)...); } } // end namespace detail CUTE_HOST_DEVICE constexpr tuple<> tuple_cat() { return {}; } template <class Tuple, __CUTE_REQUIRES(is_tuple<Tuple>::value)> CUTE_HOST_DEVICE constexpr Tuple const& tuple_cat(Tuple const& t) { return t; } template <class T0, class T1> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1) { return detail::tuple_cat(t0, t1, make_index_sequence<tuple_size<T0>::value>{}, make_index_sequence<tuple_size<T1>::value>{}); } template <class T0, class T1, class T2, class... Ts> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, Ts const&... ts) { return cute::tuple_cat(cute::tuple_cat(t0,t1),t2,ts...); } #endif #if 1 // Extended implementation namespace detail { template <class T0, class T1, size_t... I0, size_t... I1> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, index_sequence<I0...>, index_sequence<I1...>) { return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)...); } template <class T0, class T1, class T2, size_t... I0, size_t... I1, size_t... I2> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, index_sequence<I0...>, index_sequence<I1...>, index_sequence<I2...>) { return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)..., get<I2>(t2)...); } template <class T0, class T1, class T2, class T3, size_t... I0, size_t... I1, size_t... I2, size_t... I3> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, index_sequence<I0...>, index_sequence<I1...>, index_sequence<I2...>, index_sequence<I3...>) { return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)..., get<I2>(t2)..., get<I3>(t3)...); } template <class T0, class T1, class T2, class T3, class T4, size_t... I0, size_t... I1, size_t... I2, size_t... I3, size_t... I4> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, T4 const& t4, index_sequence<I0...>, index_sequence<I1...>, index_sequence<I2...>, index_sequence<I3...>, index_sequence<I4...>) { return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)..., get<I2>(t2)..., get<I3>(t3)..., get<I4>(t4)...); } template <class T0, class T1> struct tuple_cat_static; template <class... T0s, class... T1s> struct tuple_cat_static<tuple<T0s...>, tuple<T1s...>> { using type = tuple<T0s..., T1s...>; }; } // end namespace detail CUTE_HOST_DEVICE constexpr tuple<> tuple_cat() { return {}; } template <class Tuple, __CUTE_REQUIRES(is_tuple<Tuple>::value)> CUTE_HOST_DEVICE constexpr Tuple const& tuple_cat(Tuple const& t) { return t; } template <class T0, class T1> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1) { if constexpr (is_static<T0>::value && is_static<T1>::value && is_tuple<T0>::value && is_tuple<T1>::value) { return typename detail::tuple_cat_static<T0, T1>::type{}; } else { return detail::tuple_cat(t0, t1, make_index_sequence<tuple_size<T0>::value>{}, make_index_sequence<tuple_size<T1>::value>{}); } CUTE_GCC_UNREACHABLE; } template <class T0, class T1, class T2> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2) { return detail::tuple_cat(t0, t1, t2, make_index_sequence<tuple_size<T0>::value>{}, make_index_sequence<tuple_size<T1>::value>{}, make_index_sequence<tuple_size<T2>::value>{}); } template <class T0, class T1, class T2, class T3> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3) { return detail::tuple_cat(t0, t1, t2, t3, make_index_sequence<tuple_size<T0>::value>{}, make_index_sequence<tuple_size<T1>::value>{}, make_index_sequence<tuple_size<T2>::value>{}, make_index_sequence<tuple_size<T3>::value>{}); } template <class T0, class T1, class T2, class T3, class T4> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, T4 const& t4) { return detail::tuple_cat(t0, t1, t2, t3, t4, make_index_sequence<tuple_size<T0>::value>{}, make_index_sequence<tuple_size<T1>::value>{}, make_index_sequence<tuple_size<T2>::value>{}, make_index_sequence<tuple_size<T3>::value>{}, make_index_sequence<tuple_size<T4>::value>{}); } template <class T0, class T1, class T2, class T3, class T4, class T5, class... Ts> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, T2 const& t2, T3 const& t3, T4 const& t4, T5 const& t5, Ts const&... ts) { return cute::tuple_cat(cute::tuple_cat(t0,t1,t2,t3,t4), cute::tuple_cat(t5, ts...)); } #endif #if 0 // Outer-Inner indexing trick to concat all tuples at once namespace detail { template <size_t... Ns> struct tuple_cat_helper { static constexpr cute::array<size_t,sizeof...(Ns)> ns = {Ns...}; static constexpr size_t total_size() { size_t sum = 0; for (size_t n : ns) sum += n; return sum; } static constexpr size_t total_size_ = total_size(); static constexpr auto values() { cute::array<size_t[2],total_size_> outer_inner = {}; size_t idx = 0; for (size_t i = 0; i < ns.size(); ++i) { for (size_t j = 0; j < ns[i]; ++j, ++idx) { outer_inner[idx][0] = i; outer_inner[idx][1] = j; } } return outer_inner; } static constexpr auto outer_inner_ = values(); using total_sequence = make_index_sequence<total_size_>; }; template <class Helper, class Tuple, size_t... I> CUTE_HOST_DEVICE constexpr auto tuple_cat(Tuple const& t, index_sequence<I...>) { return cute::make_tuple(get<Helper::outer_inner_[I][1]>(get<Helper::outer_inner_[I][0]>(t))...); } template <class T0, class T1, size_t... I0, size_t... I1> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1, index_sequence<I0...>, index_sequence<I1...>) { return cute::make_tuple(get<I0>(t0)..., get<I1>(t1)...); } } // end namespace detail CUTE_HOST_DEVICE constexpr tuple<> tuple_cat() { return {}; } template <class Tuple, __CUTE_REQUIRES(is_tuple<Tuple>::value)> CUTE_HOST_DEVICE constexpr Tuple const& tuple_cat(Tuple const& t) { return t; } template <class T0, class T1> CUTE_HOST_DEVICE constexpr auto tuple_cat(T0 const& t0, T1 const& t1) { return detail::tuple_cat(t0, t1, make_index_sequence<tuple_size<T0>::value>{}, make_index_sequence<tuple_size<T1>::value>{}); } template <class... Tuples> CUTE_HOST_DEVICE constexpr auto tuple_cat(Tuples const&... ts) { using Helper = detail::tuple_cat_helper<tuple_size<Tuples>::value...>; return detail::tuple_cat<Helper>(cute::make_tuple(ts...), typename Helper::total_sequence{}); } #endif // // Equality operators // namespace detail { template <size_t I, class TupleA, class TupleB> CUTE_HOST_DEVICE constexpr auto equal_impl(TupleA const& a, TupleB const& b) { if constexpr (I == tuple_size<TupleA>::value) { return cute::true_type{}; // Terminal: TupleA is exhausted } else if constexpr (I == tuple_size<TupleB>::value) { return cute::false_type{}; // Terminal: TupleA is not exhausted, TupleB is exhausted } else { return (get<I>(a) == get<I>(b)) && equal_impl<I+1>(a,b); } CUTE_GCC_UNREACHABLE; } } // end namespace detail template <class TupleT, class TupleU, __CUTE_REQUIRES(is_tuple<TupleT>::value && is_tuple<TupleU>::value)> CUTE_HOST_DEVICE constexpr auto operator==(TupleT const& t, TupleU const& u) { return detail::equal_impl<0>(t, u); } template <class TupleT, class TupleU, __CUTE_REQUIRES(is_tuple<TupleT>::value ^ is_tuple<TupleU>::value)> CUTE_HOST_DEVICE constexpr auto operator==(TupleT const& t, TupleU const& u) { return cute::false_type{}; } template <class TupleT, class TupleU, __CUTE_REQUIRES(is_tuple<TupleT>::value && is_tuple<TupleU>::value)> CUTE_HOST_DEVICE constexpr auto operator!=(TupleT const& t, TupleU const& u) { return !(t == u); } template <class TupleT, class TupleU, __CUTE_REQUIRES(is_tuple<TupleT>::value ^ is_tuple<TupleU>::value)> CUTE_HOST_DEVICE constexpr auto operator!=(TupleT const& t, TupleU const& u) { return cute::true_type{}; } // // Comparison operators // // // There are many ways to compare tuple of elements and because CuTe is built // on parameterizing layouts of coordinates, some comparisons are appropriate // only in certain cases. // -- lexicographical comparison [reverse, reflected, revref] // -- colexicographical comparison [reverse, reflected, revref] // -- element-wise comparison [any,all] // This can be very confusing. To avoid errors in selecting the appropriate // comparison, op<|op<=|op>|op>= are *not* implemented for cute::tuple. // // That said, see int_tuple for more explicitly named common comparison ops. // // // Display utilities // namespace detail { template <class Tuple, size_t... Is> CUTE_HOST_DEVICE void print_tuple(Tuple const& t, index_sequence<Is...>, char s = '(', char e = ')') { using cute::print; ((void(print(Is == 0 ? s : ',')), void(print(get<Is>(t)))), ...); print(e); } #if !defined(__CUDACC_RTC__) template <class Tuple, std::size_t... Is> CUTE_HOST std::ostream& print_tuple_os(std::ostream& os, Tuple const& t, index_sequence<Is...>, char s = '(', char e = ')') { (void(os << (Is == 0 ? s : ',') << get<Is>(t)), ...); return os << e; } #endif // !defined(__CUDACC_RTC__) } // end namespace detail template <class Tuple, __CUTE_REQUIRES(is_tuple<Tuple>::value)> CUTE_HOST_DEVICE void print(Tuple const& t) { return detail::print_tuple(t, make_index_sequence<tuple_size<Tuple>::value>{}); } #if !defined(__CUDACC_RTC__) template <class Tuple, __CUTE_REQUIRES(is_tuple<Tuple>::value)> CUTE_HOST std::ostream& operator<<(std::ostream& os, Tuple const& t) { return detail::print_tuple_os(os, t, make_index_sequence<tuple_size<Tuple>::value>{}); } #endif // !defined(__CUDACC_RTC__) } // end namespace cute namespace CUTE_STL_NAMESPACE { template <class... T> struct tuple_size<cute::tuple<T...>> : CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)> {}; template <size_t I, class... T> struct tuple_element<I, cute::tuple<T...>> : CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>> {}; template <class... T> struct tuple_size<const cute::tuple<T...>> : CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)> {}; template <size_t I, class... T> struct tuple_element<I, const cute::tuple<T...>> : CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>> {}; } // end namespace CUTE_STL_NAMESPACE // // std compatibility // #ifdef CUTE_STL_NAMESPACE_IS_CUDA_STD namespace std { #if defined(__CUDACC_RTC__) template <class... _Tp> struct tuple_size; template <size_t _Ip, class... _Tp> struct tuple_element; #endif template <class... T> struct tuple_size<cute::tuple<T...>> : CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)> {}; template <size_t I, class... T> struct tuple_element<I, cute::tuple<T...>> : CUTE_STL_NAMESPACE::tuple_element<I, CUTE_STL_NAMESPACE::tuple<T...>> {}; template <class... T> struct tuple_size<const cute::tuple<T...>> : CUTE_STL_NAMESPACE::integral_constant<size_t, sizeof...(T)> {}; template <size_t I, class... T> struct tuple_element<I, const cute::tuple<T...>> : CUTE_STL_NAMESPACE::tuple_element<I, const CUTE_STL_NAMESPACE::tuple<T...>> {}; } // end namepsace std #endif // CUTE_STL_NAMESPACE_IS_CUDA_STD
cutlass/include/cute/container/tuple.hpp/0
{ "file_path": "cutlass/include/cute/container/tuple.hpp", "repo_id": "cutlass", "token_count": 8428 }
15
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include <cute/config.hpp> #include <cute/arch/util.hpp> // cast_smem_ptr_to_uint #include <cute/pointer.hpp> #include <cute/pointer_swizzle.hpp> #include <cute/swizzle_layout.hpp> #include <cute/tensor.hpp> namespace cute { // // Stand-in Swizzle Layout // A model of a nullptr smem_ptr<T> with B == sizeof_bits<T>::value // That represents an unset pointer. This is a placeholder type that is waiting for an smem_ptr // template <int Bits> struct smem_ptr_flag_bits : Int<0> {}; using smem_ptr_flag = smem_ptr_flag_bits<1>; // A flagged construction method to transform ComposedLayout // Make a swizzle pointer tensor and check that the intended type size matches template <class Iterator, class SwizzleFn, int B, class Layout> CUTE_HOST_DEVICE constexpr auto make_tensor(Iterator const& ptr, ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout) { static_assert(is_smem<Iterator>::value, "Expected smem."); static_assert(B == sizeof_bits<iter_value_t<Iterator>>::value, "Expected a B-bit pointer type."); return make_tensor(make_smem_ptr(ptr.get(), layout.layout_a()), layout.layout_b()); } // NOTE: To preserve smem_ptr_flag_bits under recast ops template <int N, class SwizzleFn, int B, class Layout> CUTE_HOST_DEVICE constexpr auto upcast(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout) { return composition(layout.layout_a(), smem_ptr_flag_bits<B*N>{}, upcast<N>(layout.layout_b())); } template <int N, class SwizzleFn, int B, class Layout> CUTE_HOST_DEVICE constexpr auto downcast(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout) { return composition(layout.layout_a(), smem_ptr_flag_bits<B/N>{}, downcast<N>(layout.layout_b())); } // // Conversion with swizzle_layout // template <class SwizzleFn, int B, class Layout> CUTE_HOST_DEVICE auto as_position_independent_swizzle_layout(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout) { return composition(recast_layout<uint8_t,uint_bit_t<B>>(layout.layout_a()), Int<0>{}, layout.layout_b()); } template <class Tensor> CUTE_HOST_DEVICE auto as_position_independent_swizzle_tensor(Tensor&& tensor) { static_assert(is_smem<remove_cvref_t<Tensor>>::value, "Expected smem tensor."); using SwizzleFn = get_swizzle_t<remove_cvref_t<Tensor>>; if constexpr (SwizzleFn::num_bits == 0) { return tensor; } else { #if !defined(NDEBUG) { uint32_t address = cast_smem_ptr_to_uint(raw_pointer_cast(static_cast<Tensor&&>(tensor).data())); uint32_t mask = ((uint32_t(1) << SwizzleFn::num_base) - 1) | SwizzleFn::swizzle_code; assert((address & mask) == 0); // Alignment to the Base, Z, and Y of Swizzle } #endif using T = typename remove_cvref_t<Tensor>::value_type; // Recast swizzle from acting on byte-addressed pointers to elements of type-T auto new_swizzle = recast_layout<uint8_t, T>(SwizzleFn{}); // Strip off everything and create a new smem_ptr for type-T auto new_ptr = make_smem_ptr<T>(raw_pointer_cast(static_cast<Tensor&&>(tensor).data())); return make_tensor(new_ptr, composition(new_swizzle, Int<0>{}, tensor.layout())); } CUTE_GCC_UNREACHABLE; } // // Display utilities // // Capture and cast smem_ptr_flag Layouts to offset-0 layouts template <class SwizzleFn, int B, class Layout> CUTE_HOST_DEVICE void print_layout(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout) { print_layout(as_position_independent_swizzle_layout(layout)); } template <class SwizzleFn, int B, class Layout> CUTE_HOST_DEVICE void print_latex(ComposedLayout<SwizzleFn,smem_ptr_flag_bits<B>,Layout> const& layout) { print_latex(as_position_independent_swizzle_layout(layout)); } template <int B> CUTE_HOST_DEVICE void print(smem_ptr_flag_bits<B> ptr) { printf("smem_ptr[%db](unset)", B); } } // end namespace cute
cutlass/include/cute/pointer_flagged.hpp/0
{ "file_path": "cutlass/include/cute/pointer_flagged.hpp", "repo_id": "cutlass", "token_count": 1960 }
16
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Architecture-specific operators on memory added for SM75 */ #pragma once #include "cutlass/array.h" #include "cutlass/layout/matrix.h" #include "cute/arch/copy_sm75.hpp" #include "cute/arch/util.hpp" namespace cutlass { namespace arch { ///////////////////////////////////////////////////////////////////////////////////////////////// template < /// Layout of destination matrix (column-major implies transpose) typename Layout, /// .x1, .x2, or .x4 int MatrixCount > inline __device__ void ldsm(Array<unsigned, MatrixCount> & D, void const* ptr); ///////////////////////////////////////////////////////////////////////////////////////////////// // // Determine the appropriate way to target PTX's "ldmatrix" instruction. // ///////////////////////////////////////////////////////////////////////////////////////////////// /// CUTLASS helper to get SMEM pointer inline __device__ unsigned cutlass_get_smem_pointer(void *ptr) { return cute::cast_smem_ptr_to_uint(ptr); } /// CUTLASS helper to get SMEM pointer inline __device__ unsigned cutlass_get_smem_pointer(void const *ptr) { return cutlass_get_smem_pointer(const_cast<void *>(ptr)); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <> inline __device__ void ldsm<layout::RowMajor, 1>( Array<unsigned, 1> & D, void const* ptr) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) unsigned addr = cutlass_get_smem_pointer(ptr); int x; asm volatile ("ldmatrix.sync.aligned.x1.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr)); reinterpret_cast<int &>(D) = x; #else CUTLASS_UNUSED(D); CUTLASS_UNUSED(ptr); CUTLASS_NOT_IMPLEMENTED(); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// template <> inline __device__ void ldsm<layout::RowMajor, 2>( Array<unsigned, 2> & D, void const* ptr) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) unsigned addr = cutlass_get_smem_pointer(ptr); int x, y; asm volatile ("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr)); reinterpret_cast<int2 &>(D) = make_int2(x, y); #else CUTLASS_UNUSED(D); CUTLASS_UNUSED(ptr); CUTLASS_NOT_IMPLEMENTED(); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// template <> inline __device__ void ldsm<layout::RowMajor, 4>( Array<unsigned, 4> & D, void const* ptr) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) unsigned addr = cutlass_get_smem_pointer(ptr); int x, y, z, w; asm volatile ("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr)); reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w); #else CUTLASS_UNUSED(D); CUTLASS_UNUSED(ptr); CUTLASS_NOT_IMPLEMENTED(); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Transpose on 16b granularity // ///////////////////////////////////////////////////////////////////////////////////////////////// template <> inline __device__ void ldsm<layout::ColumnMajor, 1>( Array<unsigned, 1> & D, void const* ptr) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) unsigned addr = cutlass_get_smem_pointer(ptr); int x; asm volatile ("ldmatrix.sync.aligned.x1.trans.m8n8.shared.b16 {%0}, [%1];" : "=r"(x) : "r"(addr)); reinterpret_cast<int &>(D) = x; #else CUTLASS_UNUSED(D); CUTLASS_UNUSED(ptr); CUTLASS_NOT_IMPLEMENTED(); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// template <> inline __device__ void ldsm<layout::ColumnMajor, 2>( Array<unsigned, 2> & D, void const* ptr) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) unsigned addr = cutlass_get_smem_pointer(ptr); int x, y; asm volatile ("ldmatrix.sync.aligned.x2.trans.m8n8.shared.b16 {%0, %1}, [%2];" : "=r"(x), "=r"(y) : "r"(addr)); reinterpret_cast<int2 &>(D) = make_int2(x, y); #else CUTLASS_UNUSED(D); CUTLASS_UNUSED(ptr); CUTLASS_NOT_IMPLEMENTED(); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// template <> inline __device__ void ldsm<layout::ColumnMajor, 4>( Array<unsigned, 4> & D, void const* ptr) { #if defined(CUTE_ARCH_LDSM_SM75_ACTIVATED) unsigned addr = cutlass_get_smem_pointer(ptr); int x, y, z, w; asm volatile ("ldmatrix.sync.aligned.x4.trans.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];" : "=r"(x), "=r"(y), "=r"(z), "=r"(w) : "r"(addr)); reinterpret_cast<int4 &>(D) = make_int4(x, y, z, w); #else CUTLASS_UNUSED(D); CUTLASS_UNUSED(ptr); CUTLASS_NOT_IMPLEMENTED(); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename AccessType, int Bytes> struct shared_load_op { CUTLASS_DEVICE shared_load_op(AccessType &D, void const *ptr) { D = *reinterpret_cast<AccessType const *>(ptr); } }; template <typename AccessType> CUTLASS_DEVICE void shared_load(AccessType &D, void const *ptr) { shared_load_op<AccessType, int(sizeof(AccessType))>(D, ptr); } ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename AccessType> struct shared_load_op<AccessType, 16> { CUTLASS_DEVICE shared_load_op(AccessType &D, void const *ptr) { unsigned addr = cutlass_get_smem_pointer(ptr); uint4 v; asm volatile ("ld.shared.v4.b32 {%0, %1, %2, %3}, [%4];" : "=r"(v.x), "=r"(v.y), "=r"(v.z), "=r"(v.w) : "r"(addr)); D = reinterpret_cast<AccessType const &>(v); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename AccessType> struct shared_load_op<AccessType, 8> { CUTLASS_DEVICE shared_load_op(AccessType &D, void const *ptr) { unsigned addr = cutlass_get_smem_pointer(ptr); uint2 v; asm volatile ("ld.shared.v2.b32 {%0, %1}, [%2];" : "=r"(v.x), "=r"(v.y) : "r"(addr)); D = reinterpret_cast<AccessType const &>(v); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace arch } // namespace cutlass
cutlass/include/cutlass/arch/memory_sm75.h/0
{ "file_path": "cutlass/include/cutlass/arch/memory_sm75.h", "repo_id": "cutlass", "token_count": 2780 }
17
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #pragma once #include "cutlass/layout/tensor.h" #include "cutlass/arch/mma.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/dispatch_policy.hpp" #include "cutlass/detail/layout.hpp" #include "cutlass/gemm/collective/builders/sm90_common.inl" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass::conv::collective::detail { ///////////////////////////////////////////////////////////////////////////////////////////////// // Maps a rank-1 cute::Shape<> representing the cluster shape on to the IM2COL TMA atom that should be used with it template <class UnimodalClusterShape> constexpr auto sm90_cluster_shape_to_im2col_tma_atom(UnimodalClusterShape unimodal_cluster_shape) { static_assert(cute::rank(unimodal_cluster_shape) == 1, "Use this function to figure out TMA for each mode individually."); if constexpr (cute::size(unimodal_cluster_shape) == 1) { return cute::SM90_TMA_LOAD_IM2COL{}; } else { return cute::SM90_TMA_LOAD_IM2COL_MULTICAST{}; } } // Collective tile traits struct that serves as a type list containing a tensor's mem layouts and atoms for the template< class GmemTiledCopy_, class SmemLayout_, class SmemCopyAtom_ = void > struct Sm90ImplicitGemmTileTraits { using GmemTiledCopy = GmemTiledCopy_; using SmemLayout = SmemLayout_; using SmemCopyAtom = SmemCopyAtom_; }; // Accepts a cutlass::layout::Tensor tag and computes the corresponding spatial dimension count template <class GmemLayoutTagA, class GmemLayoutTagB> constexpr int gmem_layout_tags_to_spatial_dims() { static_assert(cute::is_same_v<GmemLayoutTagA, GmemLayoutTagB>); if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNWC>) { return 1; } else if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNHWC>) { return 2; } else if constexpr (cute::is_same_v<GmemLayoutTagA, cutlass::layout::TensorNDHWC>) { return 3; } else { static_assert(cutlass::detail::dependent_false<GmemLayoutTagA>); } } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass::conv::collective::detail /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/collective/builders/sm90_common.inl/0
{ "file_path": "cutlass/include/cutlass/conv/collective/builders/sm90_common.inl", "repo_id": "cutlass", "token_count": 1192 }
18
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Default kernel-level implicit GEMM convolution definitions for threadblock-scoped epilogue. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/gemm/threadblock/default_mma.h" #include "cutlass/gemm/threadblock/threadblock_swizzle.h" #include "cutlass/conv/threadblock/threadblock_swizzle.h" #include "cutlass/epilogue/threadblock/default_epilogue_simt.h" #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_broadcast.h" #include "cutlass/epilogue/threadblock/default_epilogue_with_reduction.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/threadblock/conv2d_tile_iterator.h" #include "cutlass/conv/threadblock/implicit_gemm_pipelined.h" #include "cutlass/conv/threadblock/implicit_gemm_multistage.h" #include "cutlass/conv/threadblock/implicit_gemm_fprop_fusion_multistage.h" #include "cutlass/conv/threadblock/implicit_gemm_wgrad_fusion_multistage.h" #include "cutlass/conv/kernel/implicit_gemm_convolution.h" #include "cutlass/conv/kernel/implicit_gemm_convolution_fusion.h" #include "cutlass/conv/kernel/implicit_gemm_convolution_strided_dgrad.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// namespace detail { template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogue { using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogue< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, OutputOp > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename Shape, typename WarpMmaSimt, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultConvEpilogueWithBroadcastSimt { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastSimt< Shape, WarpMmaSimt, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess >::Epilogue; }; template < typename ArchTag, typename Shape, typename WarpMmaSimt, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultConvEpilogueWithBroadcastSimtStridedDgrad { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastSimtStridedDgrad< Shape, WarpMmaSimt, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess >::Epilogue; }; template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultConvEpilogueWithBroadcastTensorOp { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename ElementTensor, typename ElementVector, typename OutputOp, int ElementsPerAccess > struct DefaultConvEpilogueWithBroadcastTensorOp< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithBroadcastVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, ElementTensor, ElementVector, OutputOp, ElementsPerAccess >::Epilogue; }; ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename OutputOp, typename ReductionOp, int ElementsPerAccess > struct DefaultConvEpilogueWithReductionTensorOp { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, OutputOp, ReductionOp, ElementsPerAccess >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename ElementOutput, typename OutputOp, typename ReductionOp, int ElementsPerAccess > struct DefaultConvEpilogueWithReductionTensorOp< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, OutputOp, ReductionOp, ElementsPerAccess > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueWithReductionVoltaTensorOp< Shape, WarpMmaTensorOp, PartitionsK, ElementOutput, OutputOp, ReductionOp, ElementsPerAccess >::Epilogue; }; ///////////////////////////////////////////////////////////////////////////////////////////////// // Defaults for strided Dgrad template < typename ArchTag, typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogueStridedDgrad { using Epilogue = typename epilogue::threadblock::DefaultEpilogueTensorOpStridedDgrad< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; template < typename Shape, typename WarpMmaTensorOp, int PartitionsK, typename OutputOp > struct DefaultConvEpilogueStridedDgrad< arch::Sm70, Shape, WarpMmaTensorOp, PartitionsK, OutputOp > { using Epilogue = typename epilogue::threadblock::DefaultEpilogueVoltaTensorOpStridedDgrad< Shape, WarpMmaTensorOp, PartitionsK, OutputOp, OutputOp::kCount >::Epilogue; }; } // namespace detail ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/kernel/default_conv2d.h/0
{ "file_path": "cutlass/include/cutlass/conv/kernel/default_conv2d.h", "repo_id": "cutlass", "token_count": 2756 }
19
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Templates implementing loading of convolution tiles mapped to GEMM A (output gradient tile) matrix from memory. This iterator assumes TensorNDHWC layout of tensors in Global Memory. The iterator is specialized for each of the three convolution operators: forward propagation (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad). */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/array.h" #include "cutlass/coord.h" #include "cutlass/predicate_vector.h" #include "cutlass/tensor_ref.h" #include "cutlass/tensor_view.h" #include "cutlass/layout/pitch_linear.h" #include "cutlass/layout/tensor.h" #include "cutlass/layout/matrix.h" #include "cutlass/conv/convolution.h" #include "cutlass/conv/conv3d_problem_size.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace conv { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// template < typename Shape_, typename Element_, typename ThreadMap_, conv::StrideSupport StrideSupport_ = conv::StrideSupport::kStrided > class Conv3dDgradOutputGradientTileAccessIteratorAnalytic; ///////////////////////////////////////////////////////////////////////////////////////////////// // Conv3dDgradOutputGradientTileAccessIteratorAnalytic strided dgrad needs special handling using // unscaled coordinations template < typename Shape_, typename Element_, typename ThreadMap_ > class Conv3dDgradOutputGradientTileAccessIteratorAnalytic < Shape_, Element_, ThreadMap_, conv::StrideSupport::kStrided > { public: // // Types // using Shape = Shape_; using Element = Element_; using Layout = layout::TensorNDHWC; using ThreadMap = ThreadMap_; using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>; using TensorRef = cutlass::TensorRef<Element, Layout>; using TensorCoord = typename Layout::TensorCoord; using Index = typename Layout::Index; using LongIndex = typename Layout::LongIndex; static IteratorAlgorithm const kIteratorAlgorithm = conv::IteratorAlgorithm::kAnalytic; static StrideSupport const kStrideSupport = conv::StrideSupport::kStrided; static int const kConvDim = 3; using ConvProblemSize = typename conv::Conv3dProblemSize; static int const kAccessesPerVector = 1; static_assert(sizeof_bits<Element>::value >= 8, "DGRAD requires elements of size 8b or greater."); // // Simpligying assertions // static_assert(ThreadMap::Iterations::kContiguous == 1, "Require Iterations::kContiguous == 1"); // // Parameters structure // struct Params { Layout layout; // // Methods // CUTLASS_HOST_DEVICE Params() { } CUTLASS_HOST_DEVICE Params( ConvProblemSize const &problem_size, Layout const &layout ): layout(layout) { } }; private: Params const &params_; ConvProblemSize const &problem_size_; LongIndex iteration_contiguous_; LongIndex iteration_strided_; char const *pointer_; int filter_k_; int filter_t_; int filter_r_; int filter_s_; int offset_n_[ThreadMap::Iterations::kStrided]; int offset_d_[ThreadMap::Iterations::kStrided]; int offset_w_[ThreadMap::Iterations::kStrided]; int offset_h_[ThreadMap::Iterations::kStrided]; private: /// Returns the coordinate in the output tensor Dy that is currently pointed to /// by the iterator but DOES NOT scale by the convolution stride. This is needed /// to compute predicates in the valid() method. The return value of the public at() /// method is correctly scaled. CUTLASS_HOST_DEVICE TensorCoord unscaled_at_() const { int n = offset_n_[iteration_strided_]; int d = offset_d_[iteration_strided_]; int h = offset_h_[iteration_strided_]; int w = offset_w_[iteration_strided_]; int t = filter_t_; int r = filter_r_; int s = filter_s_; if (problem_size_.mode == Mode::kConvolution) { t = (problem_size_.T - 1 - t); r = (problem_size_.R - 1 - r); s = (problem_size_.S - 1 - s); } int z = (d + problem_size_.pad_d - t * problem_size_.dilation_d); int p = (h + problem_size_.pad_h - r * problem_size_.dilation_h); int q = (w + problem_size_.pad_w - s * problem_size_.dilation_w); return TensorCoord(n, z, p, q, filter_k_); } public: CUTLASS_HOST_DEVICE Conv3dDgradOutputGradientTileAccessIteratorAnalytic( Params const &params, ConvProblemSize const &problem_size, Element const *ptr, int thread_idx, MatrixCoord const &threadblock_offset = MatrixCoord() // threadblock offset - units are whole CTA tiles ): params_(params), problem_size_(problem_size), pointer_(reinterpret_cast<char const *>(ptr)), filter_k_(0), filter_t_(0), filter_r_(0), filter_s_(0) { layout::PitchLinearCoord thread_coord = ThreadMap::initial_offset(thread_idx); filter_k_ = threadblock_offset.column() + thread_coord.contiguous(); CUTLASS_PRAGMA_UNROLL for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { int offset_ndhw = threadblock_offset.row() + thread_coord.strided() + s * ThreadMap::Delta::kStrided; offset_n_[s] = offset_ndhw / (problem_size_.D * problem_size_.H * problem_size_.W); int residual = offset_ndhw % (problem_size_.D * problem_size_.H * problem_size_.W); offset_d_[s] = residual / (problem_size_.H * problem_size_.W); residual = residual % (problem_size_.H * problem_size_.W); offset_h_[s] = residual / problem_size_.W; offset_w_[s] = residual % problem_size_.W; } } CUTLASS_HOST_DEVICE static Params getParams(Conv3dProblemSize const &problem_size, Layout const &layout) { return Params(problem_size, layout); } /// Overrides the internal iteration index CUTLASS_HOST_DEVICE void set_iteration_index(Index index) { iteration_contiguous_ = index % ThreadMap::Iterations::kContiguous; iteration_strided_ = index / ThreadMap::Iterations::kContiguous; } /// Adds a pointer offset in units of Element CUTLASS_HOST_DEVICE void add_pointer_offset(LongIndex pointer_offset) { pointer_ += pointer_offset * sizeof_bits<Element>::value / 8; } CUTLASS_HOST_DEVICE void advance() { // move to the next tile ++filter_s_; if (filter_s_ < problem_size_.S) { return; } filter_s_ = 0; ++filter_r_; if (filter_r_ < problem_size_.R) { return; } filter_r_ = 0; ++filter_t_; if (filter_t_ < problem_size_.T) { return; } filter_t_ = 0; filter_k_ += Shape_::kColumn * problem_size_.split_k_slices; } /// Returns the coordinate in the output tensor Dy that is currently pointed to /// by the iterator. CUTLASS_HOST_DEVICE TensorCoord at() const { TensorCoord coord = unscaled_at_(); return TensorCoord( coord.n(), coord.d() / problem_size_.stride_d, coord.h() / problem_size_.stride_h, coord.w() / problem_size_.stride_w, coord.c()); } /// Returns true if the current coordinate is within the output tensor Dy CUTLASS_HOST_DEVICE bool valid() const { TensorCoord unscaled_coord = unscaled_at_(); TensorCoord coord = at(); return !(unscaled_coord.d() % problem_size_.stride_d) && !(unscaled_coord.h() % problem_size_.stride_h) && !(unscaled_coord.w() % problem_size_.stride_w) && coord.n() < problem_size_.N && coord.d() >= 0 && coord.d() < problem_size_.Z && coord.h() >= 0 && coord.h() < problem_size_.P && coord.w() >= 0 && coord.w() < problem_size_.Q && coord.c() < problem_size_.K; } /// Returns a pointer to the vector starting at the current coordinate CUTLASS_HOST_DEVICE AccessType const *get() const { TensorCoord coord = at(); LongIndex offset = params_.layout(coord); return reinterpret_cast<AccessType const *>(pointer_ + offset * sizeof_bits<Element>::value / 8); } /// Increments to the next memory access CUTLASS_HOST_DEVICE Conv3dDgradOutputGradientTileAccessIteratorAnalytic &operator++() { ++iteration_contiguous_; if (iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { return *this; } iteration_contiguous_ = 0; ++iteration_strided_; if (iteration_strided_ < ThreadMap::Iterations::kStrided) { return *this; } iteration_strided_ = 0; return *this; } /// Determines whether the Implicit GEMM can execute the given problem. CUTLASS_HOST_DEVICE static Status can_implement(ConvProblemSize const &problem_size) { // check alignment constraint on iterator's contiguous dimension if (problem_size.K % (128/sizeof_bits<Element>::value)) { return Status::kErrorInvalidProblem; } return Status::kSuccess; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace conv } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h/0
{ "file_path": "cutlass/include/cutlass/conv/threadblock/conv3d_dgrad_output_gradient_tile_access_iterator_analytic.h", "repo_id": "cutlass", "token_count": 3703 }
20
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Basic include for CUTLASS. */ #pragma once #include "cutlass/detail/helper_macros.hpp" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { /// Status code returned by CUTLASS operations enum class Status { kSuccess, ///< Operation was successful. kErrorMisalignedOperand, ///< operands fail alignment requirements. kErrorInvalidDataType, ///< DataType fails requirement. kErrorInvalidLayout, ///< Layout fails alignment requirement. kErrorInvalidProblem, ///< Specified problem size is not supported by operator. kErrorNotSupported, ///< Operation is not supported on current device. kErrorWorkspaceNull, ///< The given workspace is null when it is required to be non-null. kErrorInternal, ///< An error within CUTLASS occurred. kErrorArchMismatch, ///< CUTLASS runs on a device that it was not compiled for. kErrorInsufficientDriver, ///< CUTLASS runs with a driver that is too old. kErrorMemoryAllocation, ///< Kernel launch failed due to insufficient device memory. kInvalid ///< Status is unspecified. }; /// Convert cutlass status to status strings CUTLASS_HOST_DEVICE static char const* cutlassGetStatusString(cutlass::Status status) { switch (status) { case cutlass::Status::kSuccess: return "Success"; case cutlass::Status::kErrorMisalignedOperand: return "Error Misaligned Operand"; case cutlass::Status::kErrorInvalidDataType: return "Error Invalid Data Type"; case cutlass::Status::kErrorInvalidLayout: return "Error Invalid Layout"; case cutlass::Status::kErrorInvalidProblem: return "Error Invalid Problem"; case cutlass::Status::kErrorNotSupported: return "Error Not Supported"; case cutlass::Status::kErrorWorkspaceNull: return "Error Workspace Null"; case cutlass::Status::kErrorInternal: return "Error Internal"; case cutlass::Status::kErrorInsufficientDriver: return "Error Insufficient Driver"; case cutlass::Status::kErrorArchMismatch: return "Error Architecture Mismatch"; case cutlass::Status::kErrorMemoryAllocation: return "Error Memory Allocation failed"; case cutlass::Status::kInvalid: break; } return "Invalid status"; } //////////////////////////////////////////////////////////////////////////////////////////////////// static const int NumThreadsPerWarp = 32; static const int NumThreadsPerWarpGroup = 128; static const int NumWarpsPerWarpGroup = NumThreadsPerWarpGroup / NumThreadsPerWarp; static const int NumThreadsPerHalfWarp = NumThreadsPerWarp / 2; static const int NumThreadsPerQuad = 4; static const int NumThreadsPerQuadPair = NumThreadsPerQuad * 2; //////////////////////////////////////////////////////////////////////////////////////////////////// /// Helper function to return true when called by thread 0 of threadblock 0. CUTLASS_HOST_DEVICE bool thread0() { #if defined(__CUDA_ARCH__) return (!threadIdx.x && !threadIdx.y && !threadIdx.z) && (!blockIdx.x && !blockIdx.y && !blockIdx.z); #else return false; #endif } /// Returns a lane index in the warp. The threads in warp may not be convergent CUTLASS_DEVICE int canonical_lane_idx() { #if defined(__CUDA_ARCH__) return threadIdx.x % NumThreadsPerWarp; #else return 0; #endif } /// Returns a warp-uniform value indicating the canonical warp index of the calling threads. /// Threads within the warp must be converged. CUTLASS_DEVICE int canonical_warp_idx_sync() { #if defined(__CUDA_ARCH__) return __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarp, 0); #else return 0; #endif } /// Returns a warp index in the CTA. The threads in warp may not be convergent /// As it doesn't sync the warp, it faster and allows forward progress CUTLASS_DEVICE int canonical_warp_idx() { #if defined(__CUDA_ARCH__) return threadIdx.x / NumThreadsPerWarp; #else return 0; #endif } /// Returns a warp-uniform value indicating the canonical warp group index of the calling threads. /// Threads within the warp must be converged. CUTLASS_DEVICE int canonical_warp_group_idx() { #if defined(__CUDA_ARCH__) return __shfl_sync(0xffffffff, threadIdx.x / NumThreadsPerWarpGroup, 0); #else return 0; #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass ////////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/cutlass.h/0
{ "file_path": "cutlass/include/cutlass/cutlass.h", "repo_id": "cutlass", "token_count": 1922 }
21
/*************************************************************************************************** * Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing pipelined epilogues with bias add and elementwise activation functions. This collective is now DEPRECATED, will be removed in the next release. Use EVT instead. */ #pragma once #include "sm90_epilogue_tma_warpspecialized.hpp" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace collective { ///////////////////////////////////////////////////////////////////////////////////////////////// template < int StagesC_, int StagesD_, int FragmentSize_, class BlockTileShape_, // (BLK_M,BLK_N,BLK_K) class EpilogueTileShape_, // (EPI_TILE_M,EPI_TILE_N) class ElementC_, class StrideC_, class ElementD_, class StrideD_, class FusionCallbacks_, class CopyOpG2S_, class SmemLayoutAtomC_, class CopyOpS2R_, class CopyOpS2G_, class SmemLayoutAtomD_, class CopyOpR2S_ > class Sm90EpilogueTmaWarpSpecializedBiasElementwise : public CollectiveEpilogue< Sm90TmaWarpSpecialized<StagesC_, StagesD_, FragmentSize_, false, false>, BlockTileShape_, EpilogueTileShape_, ElementC_, StrideC_, ElementD_, StrideD_, FusionCallbacks_, CopyOpG2S_, SmemLayoutAtomC_, CopyOpS2R_, CopyOpS2G_, SmemLayoutAtomD_, CopyOpR2S_ > { private: using Impl = CollectiveEpilogue< Sm90TmaWarpSpecialized<StagesC_, StagesD_, FragmentSize_, false, false>, BlockTileShape_, EpilogueTileShape_, ElementC_, StrideC_, ElementD_, StrideD_, FusionCallbacks_, CopyOpG2S_, SmemLayoutAtomC_, CopyOpS2R_, CopyOpS2G_, SmemLayoutAtomD_, CopyOpR2S_ >; public: using DispatchPolicy = Sm90TmaWarpSpecializedBiasElementwise<StagesC_, StagesD_, FragmentSize_>; using ElementCompute = typename Impl::ThreadEpilogueOp::ElementCompute; using ElementBias = typename Impl::ThreadEpilogueOp::ElementBias; using ElementT = typename Impl::ThreadEpilogueOp::ElementAux; // Constructor inheritance using Impl::Impl; // Host side epilogue arguments struct [[deprecated("use Sm90TmaWarpSpecialized Arguments instead")]] Arguments { struct ThreadArgs { ElementCompute alpha{1}; ElementCompute beta{0}; ElementCompute const *alpha_ptr{nullptr}; ElementCompute const *beta_ptr{nullptr}; } thread; ElementC_ const* ptr_C{nullptr}; StrideC_ dC{}; ElementD_* ptr_D{nullptr}; StrideD_ dD{}; ElementBias const* ptr_Bias{nullptr}; ElementT* ptr_T{nullptr}; CUTLASS_HOST_DEVICE operator typename Impl::Arguments() const { typename Impl::Arguments arguments; arguments.thread.alpha = thread.alpha; arguments.thread.beta = thread.beta; arguments.thread.alpha_ptr = thread.alpha_ptr; arguments.thread.beta_ptr = thread.beta_ptr; if constexpr (not cute::is_void_v<ElementBias>) { arguments.thread.bias_ptr = ptr_Bias; } if constexpr (not cute::is_void_v<ElementT>) { arguments.thread.aux_ptr = ptr_T; arguments.thread.dAux = dD; } arguments.ptr_C = ptr_C; arguments.dC = dC; arguments.ptr_D = ptr_D; arguments.dD = dD; return arguments; } }; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace collective } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp/0
{ "file_path": "cutlass/include/cutlass/epilogue/collective/sm90_epilogue_tma_warpspecialized_bias_elementwise.hpp", "repo_id": "cutlass", "token_count": 1858 }
22
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Functor performing reduction operations used by epilogues. */ #pragma once #include "cutlass/cutlass.h" #include "cutlass/numeric_types.h" #include "cutlass/array.h" #include "cutlass/functional.h" #include "cutlass/numeric_conversion.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace thread { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Applies a reduction sum to an array of elements. /// /// template < typename Element_, ///< Data type used to load and store tensors int Count ///< Number of elements computed per operation > class ReductionOpPlus { public: using Element = Element_; static int const kCount = Count; using Fragment = Array<Element, kCount>; using Operator = plus<Fragment>; /// Host-constructable parameters structure struct Params { }; private: /// reduction operator Operator operator_; public: /// Constructs the function object, possibly loading from pointers in host memory CUTLASS_HOST_DEVICE ReductionOpPlus(Params const &params) { } /// Computes Compute => CUTLASS_HOST_DEVICE Fragment operator()( Fragment const &lhs, Fragment const &rhs) const { return operator_(lhs, rhs); } }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace thread } // namespace epilogue } // namespace cutlass
cutlass/include/cutlass/epilogue/thread/reduction_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/thread/reduction_op.h", "repo_id": "cutlass", "token_count": 925 }
23
/*************************************************************************************************** * Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief */ #pragma once #include "predicated_tile_iterator.h" #include "cutlass/gemm/gemm.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace epilogue { namespace threadblock { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape, typename WarpShape, int PartitionsK, typename ElementOutput, int ElementsPerAccess, typename ElementAccumulator > struct DefaultThreadMapVoltaTensorOp; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename ElementOutput_, int ElementsPerAccess > struct DefaultThreadMapVoltaTensorOp< ThreadblockShape_, WarpShape_, PartitionsK, ElementOutput_, ElementsPerAccess, half_t> { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using ElementOutput = ElementOutput_; static int const kElementsPerAccess = ElementsPerAccess; using ElementAccumulator = half_t; // // Definitions // struct Detail { static int const kTensorOpRows = 16; static int const kWarpSize = 32; static int const kInterleavedTilesM = WarpShape::kM / 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; using Shape = cutlass::epilogue::threadblock::OutputTileShape< ThreadblockShape::kN, // column 4, // row 4, // group WarpCount::kM, // cluster 1 // tile >; /// Number of iterations per subspace using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row kInterleavedTilesM, // group 1, // cluster WarpShape::kM / kTensorOpRows // iterations >; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < typename Detail::Shape, typename Detail::Count, Detail::kThreads, kElementsPerAccess, sizeof_bits<ElementOutput>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Defines the optimal thread map for TensorOp accumulator layouts template < typename ThreadblockShape_, typename WarpShape_, int PartitionsK, typename ElementOutput_, int ElementsPerAccess > struct DefaultThreadMapVoltaTensorOp< ThreadblockShape_, WarpShape_, PartitionsK, ElementOutput_, ElementsPerAccess, float> { using ThreadblockShape = ThreadblockShape_; using WarpShape = WarpShape_; static int const kPartitionsK = PartitionsK; using ElementOutput = ElementOutput_; static int const kElementsPerAccess = ElementsPerAccess; using ElementAccumulator = float; // // Definitions // struct Detail { static int const kTensorOpRows = 16; static int const kWarpSize = 32; static int const kInterleavedTilesM = WarpShape::kM / 32; static_assert( !(ThreadblockShape::kM % WarpShape::kM) && !(ThreadblockShape::kN % WarpShape::kN), "Divisibility"); /// Number of warps using WarpCount = gemm::GemmShape< ThreadblockShape::kM / WarpShape::kM, ThreadblockShape::kN / WarpShape::kN, kPartitionsK >; /// Number of participating threads static int const kThreads = WarpCount::kCount * kWarpSize; using Shape = cutlass::epilogue::threadblock::OutputTileShape< ThreadblockShape::kN, // column 4, // row 4, // group WarpCount::kM, // cluster 1 // tile >; /// Number of iterations per subspace using Count = cutlass::epilogue::threadblock::OutputTileShape< 1, // column 2, // row kInterleavedTilesM, // group 1, // cluster WarpShape::kM / kTensorOpRows // iterations >; }; // // ThreadMap // /// ThreadMap to be used by epilogue::PredicatedTileIterator satisfying concept OutputTileThreadMap using Type = OutputTileOptimalThreadMap < typename Detail::Shape, typename Detail::Count, Detail::kThreads, kElementsPerAccess, sizeof_bits<ElementOutput>::value >; }; ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace threadblock } // namespace epilogue } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
cutlass/include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h/0
{ "file_path": "cutlass/include/cutlass/epilogue/threadblock/default_thread_map_volta_tensor_op.h", "repo_id": "cutlass", "token_count": 2469 }
24