Spaces:
Sleeping
Sleeping
/*************************************************************************************************** | |
* Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | |
* SPDX-License-Identifier: BSD-3-Clause | |
* | |
* Redistribution and use in source and binary forms, with or without | |
* modification, are permitted provided that the following conditions are met: | |
* | |
* 1. Redistributions of source code must retain the above copyright notice, this | |
* list of conditions and the following disclaimer. | |
* | |
* 2. Redistributions in binary form must reproduce the above copyright notice, | |
* this list of conditions and the following disclaimer in the documentation | |
* and/or other materials provided with the distribution. | |
* | |
* 3. Neither the name of the copyright holder nor the names of its | |
* contributors may be used to endorse or promote products derived from | |
* this software without specific prior written permission. | |
* | |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
* | |
**************************************************************************************************/ | |
// Config | |
namespace cute { | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x8 TN | |
struct SM80_16x8x8_F16F16F16F16_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 " | |
"{%0, %1}," | |
"{%2, %3}," | |
"{%4}," | |
"{%5, %6};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_F16F16F16F16_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 " | |
"{%0, %1}," | |
"{%2, %3, %4, %5}," | |
"{%6, %7}," | |
"{%8, %9};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F16F16F16F16_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x8 TN | |
struct SM80_16x8x8_F32F16F16F32_TN | |
{ | |
using DRegisters = float[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = float[4]; | |
CUTE_HOST_DEVICE static void | |
fma(float & d0, float & d1, float & d2, float & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
float const & c0, float const & c1, float const & c2, float const & c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"f"(c0), "f"(c1), "f"(c2), "f"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_F32F16F16F32_TN | |
{ | |
using DRegisters = float[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = float[4]; | |
CUTE_HOST_DEVICE static void | |
fma(float & d0, float & d1, float & d2, float & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
float const & c0, float const & c1, float const & c2, float const & c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"f"(c0), "f"(c1), "f"(c2), "f"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32F16F16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x8 TN | |
struct SM80_16x8x8_F32BF16BF16F32_TN | |
{ | |
using DRegisters = float[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = float[4]; | |
CUTE_HOST_DEVICE static void | |
fma(float & d0, float & d1, float & d2, float & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
float const & c0, float const & c1, float const & c2, float const & c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k8.row.col.f32.bf16.bf16.f32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"f"(c0), "f"(c1), "f"(c2), "f"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_F32BF16BF16F32_TN | |
{ | |
using DRegisters = float[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = float[4]; | |
CUTE_HOST_DEVICE static void | |
fma(float & d0, float & d1, float & d2, float & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
float const & c0, float const & c1, float const & c2, float const & c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"f"(c0), "f"(c1), "f"(c2), "f"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_F32BF16BF16F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x4 TN | |
struct SM80_16x8x4_F32TF32TF32F32_TN | |
{ | |
using DRegisters = float[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = float[4]; | |
CUTE_HOST_DEVICE static void | |
fma(float & d0, float & d1, float & d2, float & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
float const & c0, float const & c1, float const & c2, float const & c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k4.row.col.f32.tf32.tf32.f32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"f"(c0), "f"(c1), "f"(c2), "f"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x4_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x8 TN | |
struct SM80_16x8x8_F32TF32TF32F32_TN | |
{ | |
using DRegisters = float[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = float[4]; | |
CUTE_HOST_DEVICE static void | |
fma(float & d0, float & d1, float & d2, float & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
float const & c0, float const & c1, float const & c2, float const & c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=f"(d0), "=f"(d1), "=f"(d2), "=f"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"f"(c0), "f"(c1), "f"(c2), "f"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x8_F32TF32TF32F32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x4 TN | |
struct SM80_8x8x4_F64F64F64F64_TN | |
{ | |
using DRegisters = double[2]; | |
using ARegisters = double[1]; | |
using BRegisters = double[1]; | |
using CRegisters = double[2]; | |
CUTE_HOST_DEVICE static void | |
fma(double & d0, double & d1, | |
double const& a0, | |
double const& b0, | |
double const& c0, double const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=d"(d0), "=d"(d1) | |
: "d"(a0), | |
"d"(b0), | |
"d"(c0), "d"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x4_F64F64F64F64_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
// MMA 8x8x4 TN with Planar Complex multiplication | |
struct SM80_8x8x4_C64C64C64C64_TN | |
{ | |
using DRegisters = complex<double>[2]; | |
using ARegisters = complex<double>[1]; | |
using BRegisters = complex<double>[1]; | |
using CRegisters = complex<double>[2]; | |
CUTE_HOST_DEVICE static void | |
fma(complex<double> & d0, complex<double> & d1, | |
complex<double> const& a0, | |
complex<double> const& b0, | |
complex<double> const& c0, complex<double> const& c1) | |
{ | |
// Because thrust::complex does not provide a mutable ref | |
double& rd0 = reinterpret_cast<double(&)[2]>(d0)[0]; | |
double& id0 = reinterpret_cast<double(&)[2]>(d0)[1]; | |
double& rd1 = reinterpret_cast<double(&)[2]>(d1)[0]; | |
double& id1 = reinterpret_cast<double(&)[2]>(d1)[1]; | |
// d.real() = a.real() * b.real() + c.real(); | |
SM80_8x8x4_F64F64F64F64_TN::fma( | |
rd0, rd1, | |
a0.real(), | |
b0.real(), | |
c0.real(), c1.real()); | |
// d.imag() = a.imag() * b.real() + c.imag(); | |
SM80_8x8x4_F64F64F64F64_TN::fma( | |
id0, id1, | |
a0.imag(), | |
b0.real(), | |
c0.imag(), c1.imag()); | |
// d.real() = -a.imag() * b.imag() + d.real(); | |
SM80_8x8x4_F64F64F64F64_TN::fma( | |
rd0, rd1, | |
-a0.imag(), | |
b0.imag(), | |
d0.real(), d1.real()); | |
// d.imag() = a.real() * b.imag() + d.imag(); | |
SM80_8x8x4_F64F64F64F64_TN::fma( | |
id0, id1, | |
a0.real(), | |
b0.imag(), | |
d0.imag(), d1.imag()); | |
} | |
}; | |
// MMA 8x8x4 TN with Gaussian Complex multiplication: | |
// (a + bi)*(c + di) | |
// yields | |
// t0 += a*c | |
// t1 += b*d | |
// t2 += (a+b)*(c+d) | |
// then | |
// re = t0 - t1 | |
// im = t2 - t0 - t1 | |
struct SM80_8x8x4_GC64C64C64GC64_TN | |
{ | |
struct GaussComplex { | |
double t0, t1, t2; | |
CUTE_HOST_DEVICE //constexpr | |
operator complex<double>() const { return complex<double>(t0 - t1, t2 - t0 - t1); } | |
CUTE_HOST_DEVICE friend //constexpr | |
complex<double> operator*(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) * b; } | |
CUTE_HOST_DEVICE friend //constexpr | |
complex<double> operator*(complex<double> const& a, GaussComplex const& b) { return b * a; } | |
CUTE_HOST_DEVICE friend //constexpr | |
complex<double> operator+(GaussComplex const& a, complex<double> const& b) { return static_cast<complex<double>>(a) + b; } | |
CUTE_HOST_DEVICE friend //constexpr | |
complex<double> operator+(complex<double> const& a, GaussComplex const& b) { return b + a; } | |
}; | |
using DRegisters = GaussComplex[2]; | |
using ARegisters = complex<double>[1]; | |
using BRegisters = complex<double>[1]; | |
using CRegisters = GaussComplex[2]; | |
CUTE_HOST_DEVICE static void | |
fma(GaussComplex & d0, GaussComplex & d1, | |
complex<double> const& a0, | |
complex<double> const& b0, | |
GaussComplex const& c0, GaussComplex const& c1) | |
{ | |
SM80_8x8x4_F64F64F64F64_TN::fma(d0.t0, d1.t0, | |
a0.real(), | |
b0.real(), | |
c0.t0, c1.t0); | |
SM80_8x8x4_F64F64F64F64_TN::fma(d0.t1, d1.t1, | |
a0.imag(), | |
b0.imag(), | |
c0.t1, c1.t1); | |
SM80_8x8x4_F64F64F64F64_TN::fma(d0.t2, d1.t2, | |
a0.real() + a0.imag(), | |
b0.real() + b0.imag(), | |
c0.t2, c1.t2); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32S8S8S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32S8S8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32S8S8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32S8S8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S8S8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S8S8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32S8U8S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32S8U8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.s8.u8.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32S8U8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32S8U8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.s8.u8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S8U8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S8U8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s8.u8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32U8S8S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32U8S8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.u8.s8.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32U8S8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32U8S8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.u8.s8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U8S8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U8S8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u8.s8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8S8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32U8U8S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x16 TN | |
struct SM80_8x8x16_S32U8U8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k16.row.col.s32.u8.u8.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32U8U8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x16 TN | |
struct SM80_16x8x16_S32U8U8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k16.row.col.s32.u8.u8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x16_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U8U8S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U8U8S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u8.u8.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U8U8S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32S4S4S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32S4S4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.s4.s4.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S4S4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S4S4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s4.s4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32S4S4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32S4S4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.s4.s4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32S4U4S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32S4U4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.s4.u4.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S4U4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32S4U4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.s4.u4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32S4U4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32S4U4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.s4.u4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32S4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32U4S4S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32U4S4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.u4.s4.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U4S4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U4S4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u4.s4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32U4S4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32U4S4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.u4.s4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4S4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32U4U4S32_TN | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32 " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x32 TN | |
struct SM80_8x8x32_S32U4U4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k32.row.col.s32.u4.u4.s32.satfinite " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U4U4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x32 TN | |
struct SM80_16x8x32_S32U4U4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k32.row.col.s32.u4.u4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x32_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32U4U4S32_TN | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32 " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x64 TN | |
struct SM80_16x8x64_S32U4U4S32_TN_SATURATE | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k64.row.col.s32.u4.u4.s32.satfinite " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x64_S32U4U4S32_TN_SATURATE without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 8x8x128 TN | |
struct SM80_8x8x128_S32U1U1S32_TN_XORPOPC | |
{ | |
using DRegisters = uint32_t[2]; | |
using ARegisters = uint32_t[1]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[2]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, | |
uint32_t const& a0, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m8n8k128.row.col.s32.b1.b1.s32.xor.popc " | |
"{%0, %1}," | |
"{%2}," | |
"{%3}," | |
"{%4, %5};\n" | |
: "=r"(d0), "=r"(d1) | |
: "r"(a0), | |
"r"(b0), | |
"r"(c0), "r"(c1)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_8x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x128 TN | |
struct SM80_16x8x128_S32U1U1S32_TN_XORPOPC | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[2]; | |
using BRegisters = uint32_t[1]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, | |
uint32_t const& b0, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k128.row.col.s32.b1.b1.s32.xor.popc " | |
"{%0, %1, %2, %3}," | |
"{%4, %5}," | |
"{%6}," | |
"{%7, %8, %9, %10};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), | |
"r"(b0), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x128_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
// MMA 16x8x256 TN | |
struct SM80_16x8x256_S32U1U1S32_TN_XORPOPC | |
{ | |
using DRegisters = uint32_t[4]; | |
using ARegisters = uint32_t[4]; | |
using BRegisters = uint32_t[2]; | |
using CRegisters = uint32_t[4]; | |
CUTE_HOST_DEVICE static void | |
fma(uint32_t & d0, uint32_t & d1, uint32_t & d2, uint32_t & d3, | |
uint32_t const& a0, uint32_t const& a1, uint32_t const& a2, uint32_t const& a3, | |
uint32_t const& b0, uint32_t const& b1, | |
uint32_t const& c0, uint32_t const& c1, uint32_t const& c2, uint32_t const& c3) | |
{ | |
asm volatile( | |
"mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc " | |
"{%0, %1, %2, %3}," | |
"{%4, %5, %6, %7}," | |
"{%8, %9}," | |
"{%10, %11, %12, %13};\n" | |
: "=r"(d0), "=r"(d1), "=r"(d2), "=r"(d3) | |
: "r"(a0), "r"(a1), "r"(a2), "r"(a3), | |
"r"(b0), "r"(b1), | |
"r"(c0), "r"(c1), "r"(c2), "r"(c3)); | |
CUTE_INVALID_CONTROL_PATH("Attempting to use SM80_16x8x256_S32U1U1S32_TN_XORPOPC without CUTE_ARCH_MMA_SM80_ENABLED"); | |
} | |
}; | |
//////////////////////////////////////////////////////////////////////////////////////////////////// | |
} // namespace cute | |