file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
NVIDIA/warp/warp/native/nanovdb/util/cuda/Util.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/*!
\file nanovdb/util/cuda/Util.h
\author Ken Museth
\date December 20, 2023
\brief Cuda specific utility functions
*/
#ifndef NANOVDB_UTIL_CUDA_UTIL_H_HAS_BEEN_INCLUDED
#define NANOVDB_UTIL_CUDA_UTIL_H_HAS_BEEN_INCLUDED
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <nanovdb/util/Util.h> // for stderr and NANOVDB_ASSERT
// change 1 -> 0 to only perform asserts during debug builds
#if 1 || defined(DEBUG) || defined(_DEBUG)
static inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)
{
if (code != cudaSuccess) {
fprintf(stderr, "CUDA error %u: %s (%s:%d)\n", unsigned(code), cudaGetErrorString(code), file, line);
//fprintf(stderr, "CUDA Runtime Error: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static inline void ptrAssert(const void* ptr, const char* msg, const char* file, int line, bool abort = true)
{
if (ptr == nullptr) {
fprintf(stderr, "NULL pointer error: %s %s %d\n", msg, file, line);
if (abort) exit(1);
} else if (uint64_t(ptr) % 32) {
fprintf(stderr, "Pointer misalignment error: %s %s %d\n", msg, file, line);
if (abort) exit(1);
}
}
#else
static inline void gpuAssert(cudaError_t, const char*, int, bool = true){}
static inline void ptrAssert(void*, const char*, const char*, int, bool = true){}
#endif
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
#define cudaCheck(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
#define checkPtr(ptr, msg) \
{ \
ptrAssert((ptr), (msg), __FILE__, __LINE__); \
}
#define cudaSync() \
{ \
cudaCheck(cudaDeviceSynchronize()); \
}
#define cudaCheckError() \
{ \
cudaCheck(cudaGetLastError()); \
}
namespace nanovdb {// =========================================================
namespace util{ namespace cuda {// ======================================================
//#define NANOVDB_USE_SYNC_CUDA_MALLOC
// cudaMallocAsync and cudaFreeAsync were introduced in CUDA 11.2 so we introduce
// custom implementations that map to cudaMalloc and cudaFree below. If NANOVDB_USE_SYNC_CUDA_MALLOC
// is defined these implementations will also be defined, which is useful in virtualized environments
// that slice up the GPU and share it between instances as vGPU's. GPU unified memory is usually disabled
// out of security considerations. Asynchronous CUDA malloc/free depends on GPU unified memory, so it
// is not possible to use cudaMallocAsync and cudaFreeAsync in such environments.
#if (CUDART_VERSION < 11020) || defined(NANOVDB_USE_SYNC_CUDA_MALLOC) // 11.2 introduced cudaMallocAsync and cudaFreeAsync
/// @brief Simple wrapper that calls cudaMalloc
/// @param d_ptr Device pointer to allocated device memory
/// @param size Number of bytes to allocate
/// @param dummy The stream establishing the stream ordering contract and the memory pool to allocate from (ignored)
/// @return Cuda error code
inline cudaError_t mallocAsync(void** d_ptr, size_t size, cudaStream_t){return cudaMalloc(d_ptr, size);}
/// @brief Simple wrapper that calls cudaFree
/// @param d_ptr Device pointer that will be freed
/// @param dummy The stream establishing the stream ordering promise (ignored)
/// @return Cuda error code
inline cudaError_t freeAsync(void* d_ptr, cudaStream_t){return cudaFree(d_ptr);}
#else
/// @brief Simple wrapper that calls cudaMallocAsync
/// @param d_ptr Device pointer to allocated device memory
/// @param size Number of bytes to allocate
/// @param stream The stream establishing the stream ordering contract and the memory pool to allocate from
/// @return Cuda error code
inline cudaError_t mallocAsync(void** d_ptr, size_t size, cudaStream_t stream){return cudaMallocAsync(d_ptr, size, stream);}
/// @brief Simple wrapper that calls cudaFreeAsync
/// @param d_ptr Device pointer that will be freed
/// @param stream The stream establishing the stream ordering promise
/// @return Cuda error code
inline cudaError_t freeAsync(void* d_ptr, cudaStream_t stream){return cudaFreeAsync(d_ptr, stream);}
#endif
/// @brief Simple (naive) implementation of a unique device pointer
/// using stream ordered memory allocation and deallocation.
/// @tparam T Type of the device pointer
template <typename T>
class unique_ptr
{
T *mPtr;// pointer to stream ordered memory allocation
cudaStream_t mStream;
public:
unique_ptr(size_t count = 0, cudaStream_t stream = 0) : mPtr(nullptr), mStream(stream)
{
if (count>0) cudaCheck(mallocAsync((void**)&mPtr, count*sizeof(T), stream));
}
unique_ptr(const unique_ptr&) = delete;
unique_ptr(unique_ptr&& other) : mPtr(other.mPtr), mStream(other.mStream)
{
other.mPtr = nullptr;
}
~unique_ptr()
{
if (mPtr) cudaCheck(freeAsync(mPtr, mStream));
}
unique_ptr& operator=(const unique_ptr&) = delete;
unique_ptr& operator=(unique_ptr&& rhs) noexcept
{
mPtr = rhs.mPtr;
mStream = rhs.mStream;
rhs.mPtr = nullptr;
return *this;
}
void reset() {
if (mPtr) {
cudaCheck(freeAsync(mPtr, mStream));
mPtr = nullptr;
}
}
T* get() const {return mPtr;}
explicit operator bool() const {return mPtr != nullptr;}
};// util::cuda::unique_ptr
/// @brief Computes the number of blocks per grid given the problem size and number of threads per block
/// @param numItems Problem size
/// @param threadsPerBlock Number of threads per block (second CUDA launch parameter)
/// @return number of blocks per grid (first CUDA launch parameter)
/// @note CUDA launch parameters: kernel<<< blocksPerGrid, threadsPerBlock, sharedMemSize, streamID>>>
inline size_t blocksPerGrid(size_t numItems, size_t threadsPerBlock)
{
NANOVDB_ASSERT(numItems > 0 && threadsPerBlock >= 32 && threadsPerBlock % 32 == 0);
return (numItems + threadsPerBlock - 1) / threadsPerBlock;
}
#if defined(__CUDACC__)// the following functions only run on the GPU!
/// @brief Cuda kernel that launches device lambda functions
/// @param numItems Problem size
template<typename Func, typename... Args>
__global__ void lambdaKernel(const size_t numItems, Func func, Args... args)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= numItems) return;
func(tid, args...);
}// util::cuda::lambdaKernel
#endif// __CUDACC__
}}// namespace util::cuda ============================================================
}// namespace nanovdb ===============================================================
#if defined(__CUDACC__)// the following functions only run on the GPU!
template<typename Func, typename... Args>
[[deprecated("Use nanovdb::cuda::lambdaKernel instead")]]
__global__ void cudaLambdaKernel(const size_t numItems, Func func, Args... args)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= numItems) return;
func(tid, args...);
}
#endif// __CUDACC__
#endif// NANOVDB_UTIL_CUDA_UTIL_H_HAS_BEEN_INCLUDED | 7,343 | C | 37.051813 | 124 | 0.657769 |
NVIDIA/warp/warp/native/nanovdb/cuda/DeviceBuffer.h | // Copyright Contributors to the OpenVDB Project
// SPDX-License-Identifier: MPL-2.0
/*!
\file DeviceBuffer.h
\author Ken Museth
\date January 8, 2020
\brief Implements a simple dual (host/device) CUDA buffer.
\note This file has no device-only kernel functions,
which explains why it's a .h and not .cuh file.
*/
#ifndef NANOVDB_CUDA_DEVICEBUFFER_H_HAS_BEEN_INCLUDED
#define NANOVDB_CUDA_DEVICEBUFFER_H_HAS_BEEN_INCLUDED
#include <nanovdb/HostBuffer.h>// for BufferTraits
#include <nanovdb/util/cuda/Util.h>// for cudaMalloc/cudaMallocManaged/cudaFree
namespace nanovdb {// ================================================================
namespace cuda {// ===================================================================
// ----------------------------> DeviceBuffer <--------------------------------------
/// @brief Simple memory buffer using un-managed pinned host memory when compiled with NVCC.
/// Obviously this class is making explicit used of CUDA so replace it with your own memory
/// allocator if you are not using CUDA.
/// @note While CUDA's pinned host memory allows for asynchronous memory copy between host and device
/// it is significantly slower then cached (un-pinned) memory on the host.
class DeviceBuffer
{
uint64_t mSize; // total number of bytes managed by this buffer (assumed to be identical for host and device)
void *mCpuData, *mGpuData; // raw pointers to the host and device buffers
bool mManaged;
public:
/// @brief Static factory method that return an instance of this buffer
/// @param size byte size of buffer to be initialized
/// @param dummy this argument is currently ignored but required to match the API of the HostBuffer
/// @param host If true buffer is initialized only on the host/CPU, else on the device/GPU
/// @param stream optional stream argument (defaults to stream NULL)
/// @return An instance of this class using move semantics
static DeviceBuffer create(uint64_t size, const DeviceBuffer* dummy = nullptr, bool host = true, void* stream = nullptr);
/// @brief Static factory method that return an instance of this buffer that wraps externally managed memory
/// @param size byte size of buffer specified by external memory
/// @param cpuData pointer to externally managed host memory
/// @param gpuData pointer to externally managed device memory
/// @return An instance of this class using move semantics
static DeviceBuffer create(uint64_t size, void* cpuData, void* gpuData);
/// @brief Constructor
/// @param size byte size of buffer to be initialized
/// @param host If true buffer is initialized only on the host/CPU, else on the device/GPU
/// @param stream optional stream argument (defaults to stream NULL)
DeviceBuffer(uint64_t size = 0, bool host = true, void* stream = nullptr)
: mSize(0)
, mCpuData(nullptr)
, mGpuData(nullptr)
, mManaged(false)
{
if (size > 0) this->init(size, host, stream);
}
DeviceBuffer(uint64_t size, void* cpuData, void* gpuData)
: mSize(size)
, mCpuData(cpuData)
, mGpuData(gpuData)
, mManaged(false)
{
}
/// @brief Disallow copy-construction
DeviceBuffer(const DeviceBuffer&) = delete;
/// @brief Move copy-constructor
DeviceBuffer(DeviceBuffer&& other) noexcept
: mSize(other.mSize)
, mCpuData(other.mCpuData)
, mGpuData(other.mGpuData)
, mManaged(other.mManaged)
{
other.mSize = 0;
other.mCpuData = nullptr;
other.mGpuData = nullptr;
other.mManaged = false;
}
/// @brief Disallow copy assignment operation
DeviceBuffer& operator=(const DeviceBuffer&) = delete;
/// @brief Move copy assignment operation
DeviceBuffer& operator=(DeviceBuffer&& other) noexcept
{
this->clear();
mSize = other.mSize;
mCpuData = other.mCpuData;
mGpuData = other.mGpuData;
mManaged = other.mManaged;
other.mSize = 0;
other.mCpuData = nullptr;
other.mGpuData = nullptr;
other.mManaged = false;
return *this;
}
/// @brief Destructor frees memory on both the host and device
~DeviceBuffer() { this->clear(); };
/// @brief Initialize buffer
/// @param size byte size of buffer to be initialized
/// @param host If true buffer is initialized only on the host/CPU, else on the device/GPU
/// @note All existing buffers are first cleared
/// @warning size is expected to be non-zero. Use clear() clear buffer!
void init(uint64_t size, bool host = true, void* stream = nullptr);
/// @brief Retuns a raw pointer to the host/CPU buffer managed by this allocator.
/// @warning Note that the pointer can be NULL!
void* data() const { return mCpuData; }
/// @brief Retuns a raw pointer to the device/GPU buffer managed by this allocator.
/// @warning Note that the pointer can be NULL!
void* deviceData() const { return mGpuData; }
/// @brief Upload this buffer from the host to the device, i.e. CPU -> GPU.
/// @param stream optional CUDA stream (defaults to CUDA stream 0)
/// @param sync if false the memory copy is asynchronous
/// @note If the device/GPU buffer does not exist it is first allocated
/// @warning Assumes that the host/CPU buffer already exists
void deviceUpload(void* stream = nullptr, bool sync = true) const;
/// @brief Upload this buffer from the device to the host, i.e. GPU -> CPU.
/// @param stream optional CUDA stream (defaults to CUDA stream 0)
/// @param sync if false the memory copy is asynchronous
/// @note If the host/CPU buffer does not exist it is first allocated
/// @warning Assumes that the device/GPU buffer already exists
void deviceDownload(void* stream = nullptr, bool sync = true) const;
/// @brief Returns the size in bytes of the raw memory buffer managed by this allocator.
uint64_t size() const { return mSize; }
//@{
/// @brief Returns true if this allocator is empty, i.e. has no allocated memory
bool empty() const { return mSize == 0; }
bool isEmpty() const { return mSize == 0; }
//@}
/// @brief De-allocate all memory managed by this allocator and set all pointers to NULL
void clear(void* stream = nullptr);
}; // DeviceBuffer class
// --------------------------> Implementations below <------------------------------------
inline DeviceBuffer DeviceBuffer::create(uint64_t size, const DeviceBuffer*, bool host, void* stream)
{
return DeviceBuffer(size, host, stream);
}
inline DeviceBuffer DeviceBuffer::create(uint64_t size, void* cpuData, void* gpuData)
{
return DeviceBuffer(size, cpuData, gpuData);
}
inline void DeviceBuffer::init(uint64_t size, bool host, void* stream)
{
if (mSize>0) this->clear(stream);
NANOVDB_ASSERT(size > 0);
if (host) {
cudaCheck(cudaMallocHost((void**)&mCpuData, size)); // un-managed pinned memory on the host (can be slow to access!). Always 32B aligned
checkPtr(mCpuData, "cuda::DeviceBuffer::init: failed to allocate host buffer");
} else {
cudaCheck(util::cuda::mallocAsync((void**)&mGpuData, size, reinterpret_cast<cudaStream_t>(stream))); // un-managed memory on the device, always 32B aligned!
checkPtr(mGpuData, "cuda::DeviceBuffer::init: failed to allocate device buffer");
}
mSize = size;
mManaged = true;
} // DeviceBuffer::init
inline void DeviceBuffer::deviceUpload(void* stream, bool sync) const
{
if (!mManaged) throw std::runtime_error("DeviceBuffer::deviceUpload called on externally managed memory. Replace deviceUpload call with the appropriate external copy operation.");
checkPtr(mCpuData, "uninitialized cpu data");
if (mGpuData == nullptr) {
cudaCheck(util::cuda::mallocAsync((void**)&mGpuData, mSize, reinterpret_cast<cudaStream_t>(stream))); // un-managed memory on the device, always 32B aligned!
}
checkPtr(mGpuData, "uninitialized gpu data");
cudaCheck(cudaMemcpyAsync(mGpuData, mCpuData, mSize, cudaMemcpyHostToDevice, reinterpret_cast<cudaStream_t>(stream)));
if (sync) cudaCheck(cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(stream)));
} // DeviceBuffer::gpuUpload
inline void DeviceBuffer::deviceDownload(void* stream, bool sync) const
{
if (!mManaged) throw std::runtime_error("DeviceBuffer::deviceDownload called on externally managed memory. Replace deviceDownload call with the appropriate external copy operation.");
checkPtr(mGpuData, "uninitialized gpu data");
if (mCpuData == nullptr) {
cudaCheck(cudaMallocHost((void**)&mCpuData, mSize)); // un-managed pinned memory on the host (can be slow to access!). Always 32B aligned
}
checkPtr(mCpuData, "uninitialized cpu data");
cudaCheck(cudaMemcpyAsync(mCpuData, mGpuData, mSize, cudaMemcpyDeviceToHost, reinterpret_cast<cudaStream_t>(stream)));
if (sync) cudaCheck(cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(stream)));
} // DeviceBuffer::gpuDownload
inline void DeviceBuffer::clear(void *stream)
{
if (mManaged && mGpuData) cudaCheck(util::cuda::freeAsync(mGpuData, reinterpret_cast<cudaStream_t>(stream)));
if (mManaged && mCpuData) cudaCheck(cudaFreeHost(mCpuData));
mCpuData = mGpuData = nullptr;
mSize = 0;
mManaged = false;
} // DeviceBuffer::clear
}// namespace cuda
using CudaDeviceBuffer [[deprecated("Use nanovdb::cuda::DeviceBudder instead")]] = cuda::DeviceBuffer;
template<>
struct BufferTraits<cuda::DeviceBuffer>
{
static constexpr bool hasDeviceDual = true;
};
}// namespace nanovdb
#endif // end of NANOVDB_CUDA_DEVICEBUFFER_H_HAS_BEEN_INCLUDED
| 9,790 | C | 41.202586 | 187 | 0.675587 |
NVIDIA/warp/warp/native/clang/clang.cpp | /** Copyright (c) 2023 NVIDIA CORPORATION. All rights reserved.
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*/
#include "../native/crt.h"
#include <clang/Frontend/CompilerInstance.h>
#include <clang/Basic/DiagnosticOptions.h>
#include <clang/Frontend/TextDiagnosticPrinter.h>
#if LLVM_VERSION_MAJOR >= 18
#include <llvm/Frontend/Debug/Options.h>
#else
#include <llvm/Support/CodeGen.h>
#endif
#include <clang/CodeGen/CodeGenAction.h>
#include <clang/Basic/TargetInfo.h>
#include <clang/Lex/PreprocessorOptions.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/IR/Module.h>
#include <llvm/IR/LLVMContext.h>
#include <llvm/ExecutionEngine/GenericValue.h>
#include <llvm/Target/TargetMachine.h>
#include <llvm/MC/TargetRegistry.h>
#include <llvm/PassRegistry.h>
#include <llvm/InitializePasses.h>
#include <llvm/IR/LegacyPassManager.h>
#include <llvm/IRReader/IRReader.h>
#include <llvm/Linker/Linker.h>
#include <llvm/ExecutionEngine/Orc/LLJIT.h>
#include <llvm/ExecutionEngine/JITEventListener.h>
#include <llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h>
#include <llvm/ExecutionEngine/Orc/ExecutionUtils.h>
#include <llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h>
#include <llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h>
#include <llvm/ExecutionEngine/SectionMemoryManager.h>
#include <cmath>
#include <vector>
#include <iostream>
#include <string>
#include <cstring>
#if defined(_WIN64)
extern "C" void __chkstk();
#elif defined(__APPLE__)
extern "C" void __bzero(void*, size_t);
extern "C" __double2 __sincos_stret(double);
extern "C" __float2 __sincosf_stret(float);
#endif
extern "C" {
// GDB and LLDB support debugging of JIT-compiled code by observing calls to __jit_debug_register_code()
// by putting a breakpoint on it, and retrieving the debug info through __jit_debug_descriptor.
// On Linux it suffices for these symbols not to be stripped out, while for Windows a .pdb has to contain
// their information. LLVM defines them, but we don't want a huge .pdb with all LLVM source code's debug
// info. By forward-declaring them here it suffices to compile this file with /Zi.
extern struct jit_descriptor __jit_debug_descriptor;
extern void __jit_debug_register_code();
}
namespace wp {
#if defined (_WIN32)
// Windows defaults to using the COFF binary format (aka. "msvc" in the target triple).
// Override it to use the ELF format to support DWARF debug info, but keep using the
// Microsoft calling convention (see also https://llvm.org/docs/DebuggingJITedCode.html).
static const char* target_triple = "x86_64-pc-windows-elf";
#else
static const char* target_triple = LLVM_DEFAULT_TARGET_TRIPLE;
#endif
static void initialize_llvm()
{
llvm::InitializeAllTargetInfos();
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
llvm::InitializeAllAsmPrinters();
}
static std::unique_ptr<llvm::Module> cpp_to_llvm(const std::string& input_file, const char* cpp_src, const char* include_dir, bool debug, bool verify_fp, llvm::LLVMContext& context)
{
// Compilation arguments
std::vector<const char*> args;
args.push_back(input_file.c_str());
args.push_back("-I");
args.push_back(include_dir);
args.push_back(debug ? "-O0" : "-O2");
args.push_back("-triple");
args.push_back(target_triple);
#if defined(__x86_64__) || defined(_M_X64)
args.push_back("-target-feature");
args.push_back("+f16c"); // Enables support for _Float16
#endif
clang::IntrusiveRefCntPtr<clang::DiagnosticOptions> diagnostic_options = new clang::DiagnosticOptions();
std::unique_ptr<clang::TextDiagnosticPrinter> text_diagnostic_printer =
std::make_unique<clang::TextDiagnosticPrinter>(llvm::errs(), &*diagnostic_options);
clang::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagnostic_ids;
std::unique_ptr<clang::DiagnosticsEngine> diagnostic_engine =
std::make_unique<clang::DiagnosticsEngine>(diagnostic_ids, &*diagnostic_options, text_diagnostic_printer.release());
clang::CompilerInstance compiler_instance;
auto& compiler_invocation = compiler_instance.getInvocation();
clang::CompilerInvocation::CreateFromArgs(compiler_invocation, args, *diagnostic_engine.release());
if(debug)
{
#if LLVM_VERSION_MAJOR >= 18
compiler_invocation.getCodeGenOpts().setDebugInfo(llvm::codegenoptions::FullDebugInfo);
#else
compiler_invocation.getCodeGenOpts().setDebugInfo(clang::codegenoptions::FullDebugInfo);
#endif
}
// Map code to a MemoryBuffer
std::unique_ptr<llvm::MemoryBuffer> buffer = llvm::MemoryBuffer::getMemBufferCopy(cpp_src);
compiler_invocation.getPreprocessorOpts().addRemappedFile(input_file.c_str(), buffer.get());
if(!debug)
{
compiler_instance.getPreprocessorOpts().addMacroDef("NDEBUG");
}
if(verify_fp)
{
compiler_instance.getPreprocessorOpts().addMacroDef("WP_VERIFY_FP");
}
compiler_instance.getLangOpts().MicrosoftExt = 1; // __forceinline / __int64
compiler_instance.getLangOpts().DeclSpecKeyword = 1; // __declspec
compiler_instance.createDiagnostics(text_diagnostic_printer.get(), false);
clang::EmitLLVMOnlyAction emit_llvm_only_action(&context);
bool success = compiler_instance.ExecuteAction(emit_llvm_only_action);
buffer.release();
return success ? std::move(emit_llvm_only_action.takeModule()) : nullptr;
}
static std::unique_ptr<llvm::Module> cuda_to_llvm(const std::string& input_file, const char* cpp_src, const char* include_dir, bool debug, llvm::LLVMContext& context)
{
// Compilation arguments
std::vector<const char*> args;
args.push_back(input_file.c_str());
args.push_back("-I");
args.push_back(include_dir);
args.push_back(debug ? "-O0" : "-O2");
args.push_back("-triple");
args.push_back("nvptx64-nvidia-cuda");
args.push_back("-target-cpu");
args.push_back("sm_70");
clang::IntrusiveRefCntPtr<clang::DiagnosticOptions> diagnostic_options = new clang::DiagnosticOptions();
std::unique_ptr<clang::TextDiagnosticPrinter> text_diagnostic_printer =
std::make_unique<clang::TextDiagnosticPrinter>(llvm::errs(), &*diagnostic_options);
clang::IntrusiveRefCntPtr<clang::DiagnosticIDs> diagnostic_ids;
std::unique_ptr<clang::DiagnosticsEngine> diagnostic_engine =
std::make_unique<clang::DiagnosticsEngine>(diagnostic_ids, &*diagnostic_options, text_diagnostic_printer.release());
clang::CompilerInstance compiler_instance;
auto& compiler_invocation = compiler_instance.getInvocation();
clang::CompilerInvocation::CreateFromArgs(compiler_invocation, args, *diagnostic_engine.release());
if(debug)
{
#if LLVM_VERSION_MAJOR >= 18
compiler_invocation.getCodeGenOpts().setDebugInfo(llvm::codegenoptions::FullDebugInfo);
#else
compiler_invocation.getCodeGenOpts().setDebugInfo(clang::codegenoptions::FullDebugInfo);
#endif
}
// Map code to a MemoryBuffer
std::unique_ptr<llvm::MemoryBuffer> buffer = llvm::MemoryBuffer::getMemBufferCopy(cpp_src);
compiler_invocation.getPreprocessorOpts().addRemappedFile(input_file.c_str(), buffer.get());
// According to https://llvm.org/docs/CompileCudaWithLLVM.html, "Both clang and nvcc define `__CUDACC__` during CUDA compilation."
// But this normally happens in the __clang_cuda_runtime_wrapper.h header, which we don't include.
// The __CUDA__ and __CUDA_ARCH__ macros are internally defined by llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
compiler_instance.getPreprocessorOpts().addMacroDef("__CUDACC__");
if(!debug)
{
compiler_instance.getPreprocessorOpts().addMacroDef("NDEBUG");
}
compiler_instance.getLangOpts().CUDA = 1;
compiler_instance.getLangOpts().CUDAIsDevice = 1;
compiler_instance.getLangOpts().CUDAAllowVariadicFunctions = 1;
compiler_instance.createDiagnostics(text_diagnostic_printer.get(), false);
clang::EmitLLVMOnlyAction emit_llvm_only_action(&context);
bool success = compiler_instance.ExecuteAction(emit_llvm_only_action);
buffer.release();
return success ? std::move(emit_llvm_only_action.takeModule()) : nullptr;
}
extern "C" {
WP_API int compile_cpp(const char* cpp_src, const char *input_file, const char* include_dir, const char* output_file, bool debug, bool verify_fp)
{
initialize_llvm();
llvm::LLVMContext context;
std::unique_ptr<llvm::Module> module = cpp_to_llvm(input_file, cpp_src, include_dir, debug, verify_fp, context);
if(!module)
{
return -1;
}
std::string error;
const llvm::Target* target = llvm::TargetRegistry::lookupTarget(target_triple, error);
const char* CPU = "generic";
const char* features = "";
llvm::TargetOptions target_options;
llvm::Reloc::Model relocation_model = llvm::Reloc::PIC_; // Position Independent Code
llvm::CodeModel::Model code_model = llvm::CodeModel::Large; // Don't make assumptions about displacement sizes
llvm::TargetMachine* target_machine = target->createTargetMachine(target_triple, CPU, features, target_options, relocation_model, code_model);
module->setDataLayout(target_machine->createDataLayout());
std::error_code error_code;
llvm::raw_fd_ostream output(output_file, error_code, llvm::sys::fs::OF_None);
llvm::legacy::PassManager pass_manager;
#if LLVM_VERSION_MAJOR >= 18
llvm::CodeGenFileType file_type = llvm::CodeGenFileType::ObjectFile;
#else
llvm::CodeGenFileType file_type = llvm::CGFT_ObjectFile;
#endif
target_machine->addPassesToEmitFile(pass_manager, output, nullptr, file_type);
pass_manager.run(*module);
output.flush();
delete target_machine;
return 0;
}
WP_API int compile_cuda(const char* cpp_src, const char *input_file, const char* include_dir, const char* output_file, bool debug)
{
initialize_llvm();
llvm::LLVMContext context;
std::unique_ptr<llvm::Module> module = cuda_to_llvm(input_file, cpp_src, include_dir, debug, context);
if(!module)
{
return -1;
}
std::string error;
const llvm::Target* target = llvm::TargetRegistry::lookupTarget("nvptx64-nvidia-cuda", error);
const char* CPU = "sm_70";
const char* features = "+ptx75"; // Warp requires CUDA 11.5, which supports PTX ISA 7.5
llvm::TargetOptions target_options;
llvm::Reloc::Model relocation_model = llvm::Reloc::PIC_;
llvm::TargetMachine* target_machine = target->createTargetMachine("nvptx64-nvidia-cuda", CPU, features, target_options, relocation_model);
module->setDataLayout(target_machine->createDataLayout());
// Link libdevice
llvm::SMDiagnostic diagnostic;
std::string libdevice_path = std::string(include_dir) + "/libdevice/libdevice.10.bc";
std::unique_ptr<llvm::Module> libdevice(llvm::parseIRFile(libdevice_path, diagnostic, context));
if(!libdevice)
{
return -1;
}
llvm::Linker linker(*module.get());
if(linker.linkInModule(std::move(libdevice), llvm::Linker::Flags::LinkOnlyNeeded) == true)
{
return -1;
}
std::error_code error_code;
llvm::raw_fd_ostream output(output_file, error_code, llvm::sys::fs::OF_None);
llvm::legacy::PassManager pass_manager;
#if LLVM_VERSION_MAJOR >= 18
llvm::CodeGenFileType file_type = llvm::CodeGenFileType::AssemblyFile;
#else
llvm::CodeGenFileType file_type = llvm::CGFT_AssemblyFile;
#endif
target_machine->addPassesToEmitFile(pass_manager, output, nullptr, file_type);
pass_manager.run(*module);
output.flush();
delete target_machine;
return 0;
}
// Global JIT instance
static llvm::orc::LLJIT* jit = nullptr;
// Load an object file into an in-memory DLL named `module_name`
WP_API int load_obj(const char* object_file, const char* module_name)
{
if(!jit)
{
initialize_llvm();
auto jit_expected = llvm::orc::LLJITBuilder()
.setObjectLinkingLayerCreator(
[&](llvm::orc::ExecutionSession &session, const llvm::Triple &triple) {
auto get_memory_manager = []() {
return std::make_unique<llvm::SectionMemoryManager>();
};
auto obj_linking_layer = std::make_unique<llvm::orc::RTDyldObjectLinkingLayer>(session, std::move(get_memory_manager));
// Register the event listener.
obj_linking_layer->registerJITEventListener(*llvm::JITEventListener::createGDBRegistrationListener());
// Make sure the debug info sections aren't stripped.
obj_linking_layer->setProcessAllSections(true);
return obj_linking_layer;
})
.create();
if(!jit_expected)
{
std::cerr << "Failed to create JIT instance: " << toString(jit_expected.takeError()) << std::endl;
return -1;
}
jit = (*jit_expected).release();
}
auto dll = jit->createJITDylib(module_name);
if(!dll)
{
std::cerr << "Failed to create JITDylib: " << toString(dll.takeError()) << std::endl;
return -1;
}
// Define symbols for Warp's CRT functions subset
{
#if defined(__APPLE__)
#define MANGLING_PREFIX "_"
#else
#define MANGLING_PREFIX ""
#endif
const auto flags = llvm::JITSymbolFlags::Exported | llvm::JITSymbolFlags::Absolute;
#if LLVM_VERSION_MAJOR >= 18
#define SYMBOL(sym) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::orc::ExecutorAddr::fromPtr(&::sym), flags} }
#define SYMBOL_T(sym, T) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::orc::ExecutorAddr::fromPtr(static_cast<T>(&::sym)), flags} }
auto error = dll->define(llvm::orc::absoluteSymbols(llvm::orc::SymbolMap({
#else
#define SYMBOL(sym) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::pointerToJITTargetAddress(&::sym), flags} }
#define SYMBOL_T(sym, T) { jit->getExecutionSession().intern(MANGLING_PREFIX #sym), { llvm::pointerToJITTargetAddress(static_cast<T>(&::sym)), flags} }
auto error = dll->define(llvm::orc::absoluteSymbols({
#endif
SYMBOL(printf), SYMBOL(puts), SYMBOL(putchar),
SYMBOL_T(abs, int(*)(int)), SYMBOL(llabs),
SYMBOL(fmodf), SYMBOL_T(fmod, double(*)(double, double)),
SYMBOL(logf), SYMBOL_T(log, double(*)(double)),
SYMBOL(log2f), SYMBOL_T(log2, double(*)(double)),
SYMBOL(log10f), SYMBOL_T(log10, double(*)(double)),
SYMBOL(expf), SYMBOL_T(exp, double(*)(double)),
SYMBOL(sqrtf), SYMBOL_T(sqrt, double(*)(double)),
SYMBOL(cbrtf), SYMBOL_T(cbrt, double(*)(double)),
SYMBOL(powf), SYMBOL_T(pow, double(*)(double, double)),
SYMBOL(floorf), SYMBOL_T(floor, double(*)(double)),
SYMBOL(ceilf), SYMBOL_T(ceil, double(*)(double)),
SYMBOL(fabsf), SYMBOL_T(fabs, double(*)(double)),
SYMBOL(roundf), SYMBOL_T(round, double(*)(double)),
SYMBOL(truncf), SYMBOL_T(trunc, double(*)(double)),
SYMBOL(rintf), SYMBOL_T(rint, double(*)(double)),
SYMBOL(acosf), SYMBOL_T(acos, double(*)(double)),
SYMBOL(asinf), SYMBOL_T(asin, double(*)(double)),
SYMBOL(atanf), SYMBOL_T(atan, double(*)(double)),
SYMBOL(atan2f), SYMBOL_T(atan2, double(*)(double, double)),
SYMBOL(cosf), SYMBOL_T(cos, double(*)(double)),
SYMBOL(sinf), SYMBOL_T(sin, double(*)(double)),
SYMBOL(tanf), SYMBOL_T(tan, double(*)(double)),
SYMBOL(sinhf), SYMBOL_T(sinh, double(*)(double)),
SYMBOL(coshf), SYMBOL_T(cosh, double(*)(double)),
SYMBOL(tanhf), SYMBOL_T(tanh, double(*)(double)),
SYMBOL(fmaf),
SYMBOL(memcpy), SYMBOL(memset), SYMBOL(memmove),
SYMBOL(_wp_assert),
SYMBOL(_wp_isfinite),
SYMBOL(_wp_isnan),
SYMBOL(_wp_isinf),
#if defined(_WIN64)
// For functions with large stack frames the compiler will emit a call to
// __chkstk() to linearly touch each memory page. This grows the stack without
// triggering the stack overflow guards.
SYMBOL(__chkstk),
#elif defined(__APPLE__)
SYMBOL(__bzero),
SYMBOL(__sincos_stret), SYMBOL(__sincosf_stret),
#else
SYMBOL(sincosf), SYMBOL_T(sincos, void(*)(double,double*,double*)),
#endif
#if LLVM_VERSION_MAJOR >= 18
})));
#else
}));
#endif
if(error)
{
std::cerr << "Failed to define symbols: " << llvm::toString(std::move(error)) << std::endl;
return -1;
}
}
// Load the object file into a memory buffer
auto buffer = llvm::MemoryBuffer::getFile(object_file);
if(!buffer)
{
std::cerr << "Failed to load object file: " << buffer.getError().message() << std::endl;
return -1;
}
auto err = jit->addObjectFile(*dll, std::move(*buffer));
if(err)
{
std::cerr << "Failed to add object file: " << llvm::toString(std::move(err)) << std::endl;
return -1;
}
return 0;
}
WP_API int unload_obj(const char* module_name)
{
if(!jit) // If there's no JIT instance there are no object files loaded
{
return 0;
}
auto* dll = jit->getJITDylibByName(module_name);
llvm::Error error = jit->getExecutionSession().removeJITDylib(*dll);
if(error)
{
std::cerr << "Failed to unload: " << llvm::toString(std::move(error)) << std::endl;
return -1;
}
return 0;
}
WP_API uint64_t lookup(const char* dll_name, const char* function_name)
{
auto* dll = jit->getJITDylibByName(dll_name);
auto func = jit->lookup(*dll, function_name);
if(!func)
{
std::cerr << "Failed to lookup symbol: " << llvm::toString(func.takeError()) << std::endl;
return 0;
}
return func->getValue();
}
} // extern "C"
} // namespace wp
| 18,649 | C++ | 36.525151 | 181 | 0.659231 |
NVIDIA/warp/warp/native/cutlass/PUBLICATIONS.md | # Publications Using Cutlass
## 2022
- ["Bolt: Bridging the Gap between Auto-tuners and Hardware-native Performance"](https://arxiv.org/abs/2110.15238). Jiarong Xing, Leyuan Wang, Shang Zhang, Jack Chen, Ang Chen, Yibo Zhu. _Proceedings of the 5th MLSys Conference_, August 2022.
- ["Recovering single precision accuracy from Tensor Cores while surpassing the FP32 theoretical peak performance"](https://arxiv.org/abs/2203.03341). Hiroyuki Ootomo, Rio Yokota. _International Journal of High Performance Computing_, March 2022.
## 2021
- ["Arithmetic-intensity-guided fault tolerance for neural network inference on GPUs"](https://dl.acm.org/doi/abs/10.1145/3458817.3476184). Jack Kosaian, K. V. Rashmi. _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, November 2021.
- ["Real-time Neural Radiance Caching for Path Tracing"](https://d1qx31qr3h6wln.cloudfront.net/publications/paper_4.pdf). Thomas Muller, Fabrice Rousselle, Jan Novak, Alex Keller. _ACM Trans. Graph._, August 2021.
## 2020
- ["Scalable Knowledge Graph Analytics at 136 Petaflop/s"](https://www.computer.org/csdl/proceedings-article/sc/2020/999800a061/1oeORDgCM0g). Ramakrishnan Kannan, Piyush Sao, Hao Lu, Drahomira Herrmannova, Vijay Thakkar, Robert Patton, Richard Vuduc, Thomas Potok. _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, November 2020.
- ["Accelerating Sparse DNN Models without Hardware-Support via Tile-Wise Sparsity
"](https://arxiv.org/abs/2008.13006). Cong Guo, Bo Yang Hsueh, Jingwen Leng, Yuxian Qiu, Yue Guan, Zehuan Wang, Xiaoying Jia, Xipeng Li, Minyi Guo, Yuhao Zhu. _Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis_, November 2020.
- ["Strassen's Algorithm Reloaded on GPUs"](https://dl.acm.org/doi/10.1145/3372419). Jianyu Huang, Chenhan D. Yu, Robert A. van de Geijn. _ACM Transactions on Mathematical Software_, March 2020.
| 2,019 | Markdown | 86.826083 | 392 | 0.778108 |
NVIDIA/warp/warp/native/cutlass/CONTRIBUTORS.md | 
[README](/README.md#documentation) > **Contributors**
# CUTLASS Developers and Contributors
This is the official list of CUTLASS developers and contributors.
## DEVELOPERS
Andrew Kerr
Haicheng Wu
Manish Gupta
Dustyn Blasig
Pradeep Ramani
Cris Cecka
Vijay Thakkar
Aniket Shivam
Honghao Lu
Ethan Yan
Zhaodong Chen
Jack Kosaian
Yujia Zhai
Naila Farooqui
Piotr Majcher
Paul Springer
Jin Wang
Chinmay Talegaonkar
Shang Zhang
Scott Yokim
Markus Hohnerbach
Aditya Atluri
David Tanner
Manikandan Ananth
## CUTLASS Product Manager
Matthew Nicely
## CONTRIBUTORS
Timothy Costa
Julien Demouth
Brian Fahs
Michael Goldfarb
Mostafa Hagog
Fei Hu
Alan Kaatz
Tina Li
Timmy Liu
Duane Merrill
Kevin Siu
Markus Tavenrath
John Tran
Vicki Wang
Junkai Wu
Fung Xie
Albert Xu
Jack Yang
Xiuxia Zhang
Nick Zhao
## ACKNOWLEDGEMENTS
Girish Bharambe
Luke Durant
Olivier Giroux
Stephen Jones
Rishkul Kulkarni
Bryce Lelbach
Joel McCormack
Kyrylo Perelygin
| 1,011 | Markdown | 13.457143 | 74 | 0.816024 |
NVIDIA/warp/warp/native/cutlass/CHANGELOG.md | # NVIDIA CUTLASS Changelog
## [2.11.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.11.0) (2022-11-19)
* Stream-K, which is a new general way to do split-K. It can not only improve performance, but can also significantly reduce the number of tile sizes that need to be profiled to find the best one.
* [Fused multi-head attention Kernel](/examples/41_fused_multi_head_attention). It has two variants: one uses batched GEMM for the fixed sequence length, and the other one uses group GEMM for the variable sequence length. Both versions just need one kernel.
* [Dual GEMM](/examples/45_dual_gemm), which can fuse A x B and A x C into one kernel. Two GEMMs has no producer-consumer dependency.
* Hopper improves [double precision matrix multiplication](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) by 2x compared to Ampere at iso-clocks. It is supported since CUDA 11.8.
* [BLAS3](/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu) functions with Hoppers new double precision matrix multiplication instructions.
* [ELL Block Sparse GEMM](/examples/43_ell_block_sparse_gemm), which uses an [ELL matrix](https://developer.nvidia.com/blog/accelerating-matrix-multiplication-with-block-sparse-format-and-nvidia-tensor-cores/) to describe the sparsity of A matrix. B and output matrices are still dense. The block size can be arbitary.
* Optimized [Group Conv](/examples/42_ampere_tensorop_group_conv) for SingleGroup mode, which requires that the output channel per group is a multiple of Threadblock tile N.
* [Optimized DepthWise Conv](/examples/46_depthwise_simt_conv2dfprop/depthwise_simt_conv2dfprop.cu). Two new modes are added
* [kOptimized](/test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) - use direct conv to compute instead of implicit GEMM.
* The restrictions are: 1) input ,output channel and group number should be multiple of (128 / sizeof(input element)). 2) The input filter size should be the same as the template parameter configuration.
* [kFixedStrideDilation](/test/unit/conv/device/depthwise_conv2d_fprop_direct_conv_fixed_stride_dilation_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) - which puts stride and dilation into templates to further improve the performance. In this mode, kernel persistents some inputs into register to squeeze more performance, so large filter/stride/dilation is not recommanded.
* The restrictions are: 1) input, output channel and group number should be multiple of (128 / sizeof(input element)). 2) input filter size, stride, dilation should same as the template parameter configuration.
* [Scripts](/examples/44_multi_gemm_ir_and_codegen) to fuse multiple back-to-back GEMM. Its implementation was discussed in a GTC'22 Spring [talk](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41606/).
* [FP8 data type definition](/include/cutlass/float8.h) and [conversion routines](/include/cutlass/numeric_conversion.h#L1274-2115).
* Updates and bugfixes from the community (thanks!). Big shout out to Meta's [xFormers](https://github.com/facebookresearch/xformers).
* **Deprecation announcement:** CUTLASS plans to deprecate the following:
* Maxwell and Pascal GPU architectures
* Ubuntu 16.04
* CUDA 10.2
## [2.10.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.10.0) (2022-08-23)
* [CUTLASS Python](/examples/40_cutlass_py) now supports GEMM, CONV, Group GEMM for different data types as well as different epilogue flavours.
* Optimizations for CUTLASS's [Grouped GEMM](examples/24_gemm_grouped/gemm_grouped.cu) kernel. Threadblock scheduling part is improved. Some computation can be moved to the host side if applicable. [Grouped Syr2k](examples/38_syr2k_grouped/syr2k_grouped.cu) kernels are added, too.
* Optimizations for [GEMM+Softmax](examples/35_gemm_softmax). All the reduction computation is fused into the previous GEMM. More template arguments are provided to fine tune the performance.
* [Grouped GEMM for Multihead Attention](examples/41_multi_head_attention). This general group gemm based MHA does not require the sequence length of all GEMMs to be the same which makes it most useful for natural language processing.
* [GEMM + Layer norm fusion for Ampere](examples/37_gemm_layernorm_gemm_fusion/) splits the layernorm into two parts and both of them can be fused into the GEMMs before and after separately. In addition to use square sum to compute variance of layernorm, [Shift-K](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data) is provided if square sum raise numerical issues.
* [GEMM Epilogue Permutation Fusion](examples/39_gemm_permute) can apply user provided permutation layout mapping in the GEMM epilogue.
* [Grouped convolution targeting implicit GEMM](test/unit/conv/device/group_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) introduces the first group convolution implementation to CUTLASS. It is an Analytical implementation, not an Optimized. The restrictions are: 1) input and output channel number should be multiple of group number. 2) split-K is not supported. The implementation has 2 modes:
* kSingleGroup: output channel per group is multiple of Threadblock tile N.
* kMultipleGroup: Threadblock tile N is multiple of output channel per group.
* [Depthwise separable convolution](test/unit/conv/device/depthwise_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_simt_f16_sm60.cu) introduces the first depthwise convolution which is also Analytical for now. The restrictions are: 1) SIMT only 2) No split-K 3) input channel equals to output channel equals to group number.
* Standalone [Layernorm](/tools/util/include/cutlass/util/device_layernorm.h) and [Pooling](/tools/util/include/cutlass/util/device_nhwc_pooling.h) kernels.
* [Back-to-back GEMM/CONV](examples/13_two_tensor_op_fusion) relaxes the requirement that the first GEMM K dimension needs to be the multiple of Threadblock Tile K dimension.
* Optimal performance using [**CUDA 11.6u2**](https://developer.nvidia.com/cuda-downloads)
* Updates and bugfixes from the community (thanks!)
## [2.9.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.9.0) (2022-04-21)
* [First layer Convolution kernels](/test/unit/conv/device/conv2d_fprop_fixed_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu) specialized for small channel counts and reduced alignment
* [Few channels](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_few_channels.h) specialization for reduced alignment capabilities
* [Fixed channels](/include/cutlass/conv/threadblock/conv2d_fprop_activation_tile_access_iterator_fixed_channels.h) further specialized when channel count perfectly matches the access vector size
* [Unit tests](/test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu)
* [Python-based instance emitter](/tools/library/scripts/generator.py) in the CUTLASS Library and support in the Profiler
* [BLAS3](https://docs.nvidia.com/cuda/cublas/index.html#cublas-level-3-function-reference) operators accelerated by Tensor Cores
* Supported types: f32, cf32, f64, cf64, tf32x3, complex tf32x3
* [HERK](/test/unit/gemm/device/her2k_cf32h_cf32n_tensor_op_fast_f32_sm80.cu) with [emitter](/tools/library/scripts/rank_k_operation.py)
* [SYRK](/test/unit/gemm/device/syrk_f32n_f32t_tensor_op_fast_f32_sm80.cu) with [emitter](/tools/library/scripts/rank_k_operation.py)
* [SYMM](/test/unit/gemm/device/symm_f32n_f32n_tensor_op_fast_f32_ls_sm80.cu) with [emitter](/tools/library/scripts/symm_operation.py)
* [TRMM](/test/unit/gemm/device/trmm_f32n_f32t_f32t_tensor_op_fast_f32_ls_sm80.cu) with [emitter](/tools/library/scripts/trmm_operation.py)
* [Unit tests](/test/unit/gemm/device/testbed_rank_k_universal.h)
* [CUTLASS Python](/examples/40_cutlass_py) demonstrating JIT compilation of CUTLASS kernels and a Python-based runtime using [CUDA Python](https://developer.nvidia.com/cuda-python)
* [Python-based runtime](/tools/library/scripts/rt.py) interoperable with existing emitters
* [GEMM + Softmax example](/examples/35_gemm_softmax)
* [Gather and Scatter Fusion with GEMM](/examples/36_gather_scatter_fusion) can gather inputs and scatters outputs based on indices vectors in the same GEMM kernel.
* It can select random rows in a row major matrix.
* It can select random columns in a column major matrix.
* [Back-to-back GEMM/CONV](examples/13_two_tensor_op_fusion) fully supports buffering the first GEMM/CONV results in the shared memory for the latter one to use. It can eliminate register spill when the tile size is big. Additionally, bias vector add is supported in the first GEMM/CONV.
* Supported kernels: GEMM and CONV.
* Supported types: fp16 and int8.
* Supported architectures: Turing and Ampere.
* [Transposed Convolution](/examples/34_transposed_conv2d) (a.k.a Deconvolution) support which reuses Dgrad implementation.
* [Utility functions](/tools/util/include/cutlass/util) that can pad NHWC and convert between NCHW and NHWC.
* [Small alignment implicit gemm](https://github.com/NVIDIA/cutlass/issues/242) support for Fprop/Dgrad/Wgrad so that padding is no longer mandated to use tensor cores in these kernels.
* Epilogue enhancement:
* Eliminate bank conflicts in int8 tensor core kernels.
* Half2 usage if epilogue compute type is fp16.
* More activation functions: Silu, Hardswish, Leaky Relu.
* New elementwise fusion pattern for [residual block](/include/cutlass/epilogue/thread/linear_combination_residual_block.h).
* [Group GEMM](/examples/24_gemm_grouped) thread block number calculation fix which helps to launch the intended number of threadblocks to fully occupy the GPUs.
* [Parallel GEMM splitk](https://github.com/NVIDIA/cutlass/pull/277) support in the CUTLASS profiler.
* Optimal performance using [**CUDA 11.6u2**](https://developer.nvidia.com/cuda-downloads)
* Updates and bugfixes from the community (thanks!)
## [2.8.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.8.0) (2021-11-19)
* **TF32x3:** emulated single-precision using Tensor Cores
* 45+ TFLOPs on NVIDIA A100
* [GEMM SDK example](/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm/27_ampere_3xtf32_fast_accurate_tensorop_gemm.cu) (real)
* [COMPLEX GEMM SDK example](/examples/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm/29_ampere_3xtf32_fast_accurate_tensorop_complex_gemm.cu) (complex)
* [Implicit GEMM Convolution SDK example](/examples/28_ampere_3xtf32_fast_accurate_tensorop_fprop/ampere_3xtf32_fast_accurate_tensorop_fprop.cu)
* **Mainloop fusion for Convolution:** convolution with fused per-channel scale-bias-relu
* [Conv Fprop SDK example](/examples/25_ampere_fprop_mainloop_fusion/ampere_fprop_mainloop_fusion.cu)
* [Conv WGrad SDK example](/examples/26_ampere_wgrad_mainloop_fusion/ampere_wgrad_mainloop_fusion.cu)
* [cutlass::conv::device::ImplicitGemmConvolutionFusion](/include/cutlass/conv/device/implicit_gemm_convolution_fusion.h)
* **Grouped GEMM:** similar to batched GEMM with distinct problem size per group
* [SDK example](/examples/24_gemm_grouped) with performance comparison with Batched Strided GEMM
* [cutlass::gemm::device::GemmGrouped](/include/cutlass/gemm/device/gemm_grouped.h)
* [Implicit GEMM Convolution fusion](/examples/13_two_tensor_op_fusion/) supports staging 1st convolution's output accumulator in the shared memory on Turing. This allows more flexible warp tile sizes and less regsiter pressue.
* Optimal performance using [**CUDA 11.5**](https://developer.nvidia.com/cuda-downloads)
* Updates from the community (thanks!)
* **Deprecation announcement:** CUTLASS plans to deprecate the following:
* Maxwell and Pascal GPU architectures
* Ubuntu 16.04
* CUDA 10.2
## [2.7.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.7.0) (2021-09-24)
* Mainloop fusion for GEMM: [summation over A or B](/examples/23_ampere_gemm_operand_reduction_fusion/ampere_gemm_operand_reduction_fusion.cu)
* [Strided DGRAD (optimized iterators)](/include/cutlass/conv/kernel/default_conv2d_dgrad.h)
* [Half-precision GELU_taylor activation functions](/include/cutlass/epilogue/thread/activation.h#L196)
* Use these when accumulation and epilogue compute types are all `cutlass::half_t`
* Tuning and bug fixes to [fused GEMM + GEMM example](/examples/13_two_tensor_op_fusion/)
* Support for smaller than 128b aligned Convolutions: [see examples](test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu#L272)
* Caching of results to accelerate Convolution [unit tests](test/unit/conv/device/cache_testbed_output.h)
* Can be enabled or disabled by running `cmake .. -DCUTLASS_TEST_ENABLE_CACHED_RESULTS=OFF`
* Corrections and bug fixes reported by the CUTLASS community
* Thank you for filing these issues!
## [2.6.1](https://github.com/NVIDIA/cutlass/releases/tag/v2.6.1) (2021-09-03)
* Arbitrary padding and striding for CUTLASS Strided DGRAD Convolution operator (Analytic Iterators)
* Tuning for GEMMs fused with partial reductions
* Corrections and bug fixes reported by the CUTLASS community
* Thank you for filing these issues!
## [2.6.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.6.0) (2021-07-22)
* Optimal performance when compiled with the [CUDA 11.4 Toolkit](https://developer.nvidia.com/cuda-toolkit)
* Adopt the new L2 prefetch feature in [cp.async](/include/cutlass/arch/memory.h) and [global load](/include/cutlass/arch/memory_sm80.h)
* Fused operators with GEMM and Convolution
* [Fused broadcast in epilogue](test/unit/gemm/device/gemm_with_broadcast_f16n_f16n_f16n_tensorop_f32_sm75.cu)
* [Fused partial reduction in epilogue](/test/unit/gemm/device/gemm_with_reduction_f16n_f16n_f16n_tensorop_f32_sm75.cu)
* 64b tensor strides and leading dimensions support for GEMMs
* Affine rank=2 matrix layouts
* Row stride and column stride for matrices using [cutlass::layout::AffineRank2](/include/cutlass/layout/matrix.h)
* Support [FP64 tensor core](/examples/18_ampere_fp64_tensorop_affine2_gemm/ampere_fp64_tensorop_affine2_gemm.cu) and SIMT GEMM.
* [Batched GEMV](/test/unit/gemm/device/gemv.cu) preview implementation
* [New strided Dgrad](test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu) implementation
* Accelerates over previous implementation by cutting down redundant math by 4x
* Support using new `Dy` and `w` analytic iterators and existing `cutlass::conv::device::ImplicitGemmConvolution` interface
* Quaternion-valued GEMM and Convolution in single- and double-precision (targeting CUDA Cores)
* Updates to [quaternion.h](/include/cutlass/quaternion.h) and [functional.h](/include/cutlass/functional.h)
* SDK Example for [GEMM](/examples/21_quaternion_gemm/quaternion_gemm.cu) and [Convolution](/examples/22_quaternion_gemm/quaternion_conv.cu)
* [Unit tests for GEMM](/test/unit/gemm/device/simt_qgemm_nn_sm50.cu) and [Convolution](/test/unit/conv/device/conv2d_fprop_implicit_gemm_qf32nhwc_qf32nhwc_qf32nhwc_simt_f32_sm50.cu)
* Many improvements to the epilogue.
* Provide an [option](/include/cutlass/epilogue/threadblock/epilogue.h) to not fully unroll the epilogue to reduce the code size and improve the performance when using complicated elementwise operations
* Performance improvement for FP16 tensor core kernels
* Bug fixes
* Enhanced Clang support and the combination of Clang 13 and CUDA 11.4 can build and run kernels from Pascal and Ampere.
* Updated minimum CUDA Toolkit requirement to 10.2
* [CUDA 11.4 Toolkit](https://developer.nvidia.com/cuda-toolkit) recommended
* Corrections and bug fixes reported by the CUTLASS community
* Thank you for filing these issues!
## [2.5.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.5.0) (2021-02-26)
* Tensor reductions
* _m_-to-_n_ reductions of tensors with affine layout
* [Specializations](/test/unit/reduction/device/tensor_reduce_contiguous.cu) for reductions including contiguous dimension
* [Specializations](/test/unit/reduction/device/tensor_reduce_strided.cu) for reductions excluding contiguous dimension
* Custom reduction functors such as `cutlass::logical_and`
* Large tensor support, up to 2^63 elements (however, each dimension is limited to an extent of 2^31)
* Optimizations for 3-D convolution
* [Optimized tile iterators](include/cutlass/conv/threadblock/conv3d_fprop_activation_tile_access_iterator_optimized.h) using precomputed delta table for 3-D convolution
* Full coverage of [forward](test/unit/conv/device/conv3d_fprop_implicit_gemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op_f32_sm80.cu) and [backwards](test/unit/conv/device/conv3d_dgrad_implicit_gemm_f16ndhwc_f16ndhwc_f32ndhwc_tensor_op_f32_sm80.cu) passes for 3D convolution
* [Fused Convolution+Convolution example](/examples/13_two_tensor_op_fusion/README.md)
* Corrections and bug fixes reported by the CUTLASS community
* Thank you for filing these issues!
## [2.4.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.4.0) (2020-11-19)
* Implicit GEMM convolution kernels supporting CUDA and Tensor Cores on NVIDIA GPUs
* Operators: forward (Fprop), backward data gradient (Dgrad), and backward weight gradient (Wgrad) convolution
* Data type: FP32, complex<FP32>, Tensor Float 32 (TF32), BFloat16 (BF16), Float16, Int4, Int8, Int32
* Spatial dimensions: 1-D, 2-D, and 3-D
* Layout: NHWC, NCxHWx
* Implicit GEMM convolution components:
* Global memory iterators supporting Fprop, Dgrad, and Wgrad
* `MmaMultistage` for implicit GEMM convolution for NVIDIA Ampere architecture
* `MmaPipeline` for implicit GEMM convolution for NVIDIA Volta and Turing architectures
* [Documentation](/media/docs/implicit_gemm_convolution.md) describing Implicit GEMM Convolution algorithm and implementation
## [2.3.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.3.0) (2020-09-23)
* [NVIDIA Ampere Architecture features](https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/)
* [Sparse Tensor Core GEMM kernels](test/unit/gemm/device/gemm_f16n_f16n_f32t_tensor_op_f32_sparse_sm80.cu):
* Direct access to Sparse Tensor Cores and maximum performance via [`mma.sp.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma-and-friends)
* Fast SGEMM targeting GeForce RTX 30-series CUDA Cores
* Minor Features:
* [Activation functions](/include/cutlass/epilogue/thread/activation.h) such as [GeLU](/include/cutlass/epilogue/thread/linear_combination_gelu.h) and [Sigmoid](/include/cutlass/epilogue/thread/linear_combination_sigmoid.h)
* Small [matrix](/include/cutlass/matrix.h) and [quaternion](/include/cutlass/quaternion.h) template classes in device code
* [Floating-point constants](/include/cutlass/constants.h)
* NVIDIA Ampere GPU Architecture examples and documentation:
* [Tensor Float 32](/examples/14_ampere_tf32_tensorop_gemm/ampere_tf32_tensorop_gemm.cu) and
* [Sparse Tensor Cores](/examples/15_ampere_sparse_tensorop_gemm/ampere_sparse_tensorop_gemm.cu)
* Documentation added on CUTLASS [efficient row-major epilogue](/media/docs/gemm_api.md#efficient-epilogue)
## [2.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.2.0) (2020-06-08)
* [NVIDIA Ampere Architecture features](https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/)
* Fast Tensor Core operations:
* Maximum performance via [`mma.sync`](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma-and-friends)
* Tensor Float 32, BFloat16, and double-precision data types
* Mixed integer data types (int8, int4, bin1)
* Asynchronous copy for deep software pipelines via [`cp.async`](https://docs.nvidia.com/cuda/parallel-thread-execution)
* Described in [GTC 2020 Webinar (SR 21745)](https://developer.nvidia.com/gtc/2020/video/s21745) (free registration required)
* Features:
* SDK examples showing GEMM fused with bias+relu and fused GEMM+GEMM
* Complex-valued GEMMs targeting NVIDIA Ampere Tensor Cores in double-precision and Tensor Float 32
* Gaussian complex GEMMs using 3m complex multiply algorithm
* Universal GEMM kernel supporting two batch modes and two algorithms for parallel reductions
* Policy updates:
* [CUDA 11 Toolkit](https://developer.nvidia.com/cuda-toolkit) needed to enable NVIDIA Ampere Architecture features
* Disabled F16C by default for compatibility - enable on cmake command line with `-DCUTLASS_ENABLE_F16C=ON`
## [2.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.1.0) (2020-04-06)
* BLAS-style host-side API added to [CUTLASS Library](/media/docs/quickstart.md#cutlass-library)
* API to launch compiled kernel instances for GEMM and planar complex GEMM
* Planar Complex GEMM kernels targeting Volta and Turing Tensor Cores
* Computes complex matrix products on matrices stored as disjoint real and imaginary parts
* [SDK Examples of Planar Complex GEMMs](/examples/10_planar_complex/planar_complex.cu)
* Minor enhancements and bug fixes
## [2.0.0](https://github.com/NVIDIA/cutlass/releases/tag/v2.0.0) (2019-11-19)
* Substantially refactored for
* Better performance, particularly for native Turing Tensor Cores
* Robust and durable templates spanning the design space
* Encapsulated functionality embodying modern C++11 programming techniques
* Optimized containers and data types for efficient, generic, portable device code
* Updates to:
* [Quick start guide](/media/docs/quickstart.md)
* [Documentation](/README.md#documentation)
* [Utilities](/media/docs/utilities.md)
* [CUTLASS Profiler](/media/docs/profiler.md)
* Native Turing Tensor Cores
* Efficient GEMM kernels targeting Turing Tensor Cores
* Mixed-precision floating point, 8-bit integer, 4-bit integer, and binarized operands
* Coverage of existing CUTLASS functionality
* GEMM kernels targeting CUDA and Tensor Cores in NVIDIA GPUs
* Volta Tensor Cores through native mma.sync and through WMMA API
* Optimizations such as parallel reductions, threadblock rasterization, and intra-threadblock reductions
* Batched GEMM operations
* Complex-valued GEMMs
* **Note: a host compiler supporting C++11 or greater is required.**
# CUTLASS 1.x
## [1.3.2](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.2) (2019-07-09)
* Performance improvement for Volta Tensor Cores TN and TT layouts.
## [1.3.1](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.1) (2019-04-09)
* Corrected NVRTC unit tests.
## [1.3.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.3.0) (2019-03-20)
* Efficient GEMM kernel targeting Volta Tensor Cores via `mma.sync` instruction added in CUDA 10.1.
## [1.2.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.2.0) (2018-10-26)
* Parallelized reductions across threadblocks ("Split-K")
* Improved IGEMM performance
* Batched strided WMMA GEMMs
## [1.1.0](https://github.com/NVIDIA/cutlass/releases/tag/v1.1.0) (2018-09-19)
* Turing Features
* WMMA GEMM targeting TensorCores - INT8, INT4, 1-bit
* Batched Strided GEMM
* Threadblock rasterization strategies
* Improved performance for adverse problem sizes and data layouts
* Extended CUTLASS Core comonents
* Tensor views support arbitrary matrix and tensor layouts
* Zip iterators for structuring multiple data streams
* Enhanced CUTLASS utilities
* Reference code for tensor operations in host and device code
* Added HostMatrix<> for simplified matrix creation
* Examples
* Basic GEMM, tensor views, CUTLASS utilities, batched GEMM, WMMA GEMM
## [1.0.1](https://github.com/NVIDIA/cutlass/releases/tag/v1.0.1) (2018-06-11)
* Intra-threadblock reduction added for small threadblock tile sizes
* sgemm_64x128x16, sgemm_128x128x16, sgemm_128x64x16, sgemm_128x32x16, sgemm_64x64x16, sgemm_64x32x16
* igemm_32x32x128
* GEMM _K_ residue handled during prologue prior to mainloop
* Replaced Google Test copy with submodule. Use `git submodule init --recursive --update`
## [1.0.0](https://github.com/NVIDIA/cutlass/commit/2028ebe120aab22bfd0b2baf8902d4c9627eb33f) (2018-05-16)
* Substantial rewrite to accommodate new architecture
* Kernels: SGEMM, DGEMM, IGEMM, HGEMM, WMMA GEMM
* Unit and performance tests
## [0.0.1](https://github.com/NVIDIA/cutlass/commit/d08ba8ac46e2fa3f745e070c390182edb56b2e91) (2017-12-04)
* Initial release
## Copyright
Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| 26,247 | Markdown | 81.54088 | 427 | 0.770107 |
NVIDIA/warp/warp/native/cutlass/README.md | 
# CUTLASS 2.11
_CUTLASS 2.11 - November 2022_
CUTLASS is a collection of CUDA C++ template abstractions for implementing
high-performance matrix-multiplication (GEMM) and related computations at all levels
and scales within CUDA. It incorporates strategies for hierarchical decomposition and
data movement similar to those used to implement cuBLAS and cuDNN. CUTLASS decomposes
these "moving parts" into reusable, modular software components abstracted by C++ template
classes. These thread-wide, warp-wide, block-wide, and device-wide primitives can be specialized
and tuned via custom tiling sizes, data types, and other algorithmic policy. The
resulting flexibility simplifies their use as building blocks within custom kernels
and applications.
To support a wide variety of applications, CUTLASS provides extensive support for
mixed-precision computations, providing specialized data-movement and
multiply-accumulate abstractions for half-precision floating
point (FP16), BFloat16 (BF16), Tensor Float 32 (TF32),
single-precision floating point (FP32),
[FP32 emulation via tensor core instruction](/examples/27_ampere_3xtf32_fast_accurate_tensorop_gemm),
double-precision floating
point (FP64) types, integer data types (4b and 8b), and binary data types (1b).
CUTLASS demonstrates warp-synchronous matrix multiply operations
targeting the programmable, high-throughput _Tensor Cores_ implemented by
NVIDIA's Volta, Turing, and Ampere architectures.
CUTLASS implements high-performance Convolution via the implicit GEMM algorithm.
Implicit GEMM is the formulation of a convolution operation as a GEMM thereby taking advantage of
CUTLASS's modular GEMM pipeline.
This allows CUTLASS to build convolutions by reusing highly optimized warp-wide GEMM components and below.
See the [Quick Start Guide](/media/docs/quickstart.md) to get started quickly.
See the [functionality listing](/media/docs/functionality.md) for the list of operations
supported at each level of the execution model hierarchy.
# What's New in CUTLASS 2.11
CUTLASS 2.11 is an update to CUTLASS adding:
- Stream-K, which is a new general way to do split-K. It can not only improve performance, but can also significantly reduce the number of tile sizes that need to be profiled to find the best one.
- [Fused multi-head attention kernel](/examples/41_fused_multi_head_attention). It has two variants: one for fixed sequence lengths, and another for variable sequence lengths.
- [Dual GEMM](/examples/45_dual_gemm). It can run two GEMMs that share the same left input matrix in one kernel.
- Hopper improves [double precision matrix multiplication](/test/unit/gemm/device/gemm_f64n_f64t_f64t_tensor_op_f64_sm90.cu) by 2x compared to Ampere at iso-clocks. It is supported since CUDA 11.8.
- [BLAS3](/test/unit/gemm/device/hemm_cf64_cf64_cf64_tensor_op_f64_sm90.cu) functions with Hoppers new double precision matrix multiplication instructions.
- [ELL Block Sparse GEMM](/examples/43_ell_block_sparse_gemm).
- [Optimized Group Conv](/examples/42_ampere_tensorop_group_conv).
- [Optimized DepthWise Conv](/examples/46_depthwise_simt_conv2dfprop).
- [Scripts](/examples/44_multi_gemm_ir_and_codegen) to fuse multiple back-to-back GEMM.
- [FP8 data type definition](/include/cutlass/float8.h) and [conversion routines](/include/cutlass/numeric_conversion.h#L1274-2115).
- Updates and bugfixes from the community (thanks!). Big shout out to Meta's [xFormers](https://github.com/facebookresearch/xformers).
- **Deprecation announcement:** CUTLASS plans to deprecate the following in the next major release:
- Maxwell and Pascal GPU architectures
- Ubuntu 16.04
- CUDA 10.2
**See the [CHANGELOG](CHANGELOG.md) for a detailed listing of releases and updates.**
# Performance
<p align="center"><img src=/media/images/cutlass-2.8-gemm-performance.png></p>
CUTLASS primitives are very efficient. When used to construct device-wide GEMM kernels,
they exhibit performance comparable to cuBLAS for scalar GEMM
computations. The above figure shows CUTLASS performance relative to cuBLAS
for large matrix dimensions on an [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/),
an [NVIDIA A2](https://www.nvidia.com/en-us/data-center/products/a2/),
an [NVIDIA TitanV](https://www.nvidia.com/en-us/titan/titan-v/),
and an [NVIDIA GeForce 2080 Ti](https://www.nvidia.com/en-us/geforce/graphics-cards/rtx-2080-ti/)
compiled with the [CUDA 11.5 Toolkit](https://developer.nvidia.com/cuda-downloads). Tensor Core operations are implemented using CUDA's
[mma instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma).
<p align="center"><img src=/media/images/cutlass-2.9-implicit-gemm-performance.png></p>
When using CUTLASS building blocks to construct device-wide implicit gemm (Fprop, Dgrad, and Wgrad)
kernels, CUTLASS performance is also comparable to cuDNN when running Resnet-50 layers on an [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/)
as shown in the above figure. Tensor Core operations are still implemented using CUDA's
[mma instruction](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-mma).
# Compatibility
CUTLASS requires a C++11 host compiler and performs best when built with the [**CUDA 11.8 Toolkit**](https://developer.nvidia.com/cuda-toolkit).
It is also compatible with CUDA 11.x.
## Operating Systems
We have tested the following environments.
|**Operating System** | **Compiler** |
|-----------------|----------|
| Windows 10 | Microsoft Visual Studio 2015|
| | Microsoft Visual Studio 2017|
| | Microsoft Visual Studio 2019|
| Ubuntu 18.04 | GCC 7.5.0 |
| Ubuntu 20.04 | GCC 10.3.0 |
| Ubuntu 22.04 | GCC 11.2.0 |
Additionally, CUTLASS may be built with clang.
See [these instructions](media/docs/quickstart.md#clang) for more details.
## Hardware
CUTLASS runs successfully on the following NVIDIA GPUs, and it is expected to be efficient on
any Volta-, Turing-, or NVIDIA Ampere- architecture NVIDIA GPU.
|**GPU**|**CUDA Compute Capability**|**Minimum CUDA Toolkit**|**Minimum CUDA Toolkit Enabling Native Tensor Cores**|
|---|---|---|---|
|NVIDIA Tesla V100|7.0|9.2|10.1|
|NVIDIA TitanV|7.0|9.2|10.1|
|NVIDIA GeForce RTX 2080 TI, 2080, 2070|7.5|10.0|10.2|
|NVIDIA Tesla T4|7.5|10.0|10.2|
|NVIDIA A100|8.0|11.0|11.0|
|NVIDIA A10 |8.6|11.1|11.1|
|NVIDIA GeForce 3090|8.6|11.1|11.1|
|NVIDIA H100 PCIe|9.0|11.8|Double-precision: 11.8|
# Documentation
CUTLASS is described in the following documents and the accompanying
[Doxygen documentation](https://nvidia.github.io/cutlass).
- [Quick Start Guide](/media/docs/quickstart.md) - build and run CUTLASS
- [Functionality](/media/docs/functionality.md) - summarizes functionality available in CUTLASS
- [Efficient GEMM in CUDA](media/docs/efficient_gemm.md) - describes how GEMM kernels may be implemented efficiently in CUDA
- [GEMM API](media/docs/gemm_api.md) - describes the CUTLASS GEMM model and C++ template concepts
- [Implicit GEMM Convolution](media/docs/implicit_gemm_convolution.md) - describes 2-D and 3-D convolution in CUTLASS
- [Code Organization](media/docs/code_organization.md) - describes the organization and contents of the CUTLASS project
- [Terminology](media/docs/terminology.md) - describes terms used in the code
- [Programming Guidelines](media/docs/programming_guidelines.md) - guidelines for writing efficient modern CUDA C++
- [Fundamental types](media/docs/fundamental_types.md) - describes basic C++ classes used in CUTLASS to represent numeric quantities and arrays
- [Layouts](media/docs/layout.md) - describes layouts of matrices and tensors in memory
- [Tile Iterators](media/docs/tile_iterator_concept.md) - describes C++ concepts for iterating over tiles of matrices in memory
- [CUTLASS Profiler](media/docs/profiler.md) - command-line driven profiling application
- [CUTLASS Utilities](media/docs/utilities.md) - additional templates used to facilate rapid development
# Resources
We have also described the structure of an efficient GEMM in our talk at the
[GPU Technology Conference 2018](http://on-demand.gputechconf.com/gtc/2018/presentation/s8854-cutlass-software-primitives-for-dense-linear-algebra-at-all-levels-and-scales-within-cuda.pdf).
- [CUTLASS: Software Primitives for Dense Linear Algebra at All Levels and Scales within CUDA](https://www.nvidia.com/en-us/on-demand/session/gtcsiliconvalley2018-s8854/)
- [Developing CUDA Kernels to Push Tensor Cores to the Absolute Limit on NVIDIA A100](https://www.nvidia.com/en-us/on-demand/session/gtcsj20-s21745/)
- [Accelerating Convolution with Tensor Cores in CUTLASS](https://www.nvidia.com/en-us/on-demand/session/gtcspring21-s31883/)
- [Accelerating Backward Data Gradient by Increasing Tensor Core Utilization in CUTLASS](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41996/)
- [CUTLASS: Python API, Enhancements, and NVIDIA Hopper](https://www.nvidia.com/en-us/on-demand/session/gtcfall22-a41131/)
# Building CUTLASS
CUTLASS is a header-only template library and does not need to be built to be used by other
projects. Client applications should target CUTLASS's `include/` directory in their include
paths.
CUTLASS unit tests, examples, and utilities can be build with CMake starting version 3.12.
Make sure the `CUDACXX` environment variable points to NVCC in the CUDA Toolkit installed
on your system.
```bash
$ export CUDACXX=${CUDA_INSTALL_PATH}/bin/nvcc
```
Create a build directory within the CUTLASS project, then run CMake. By default CUTLASS will build kernels
for CUDA architecture versions 5.0, 6.0, 6.1, 7.0, 7.5, 8.0, and 8.6. To reduce compile time you can specify
the architectures to build CUTLASS for by changing the CMake configuration setting
`CUTLASS_NVCC_ARCHS`.
```bash
$ mkdir build && cd build
$ cmake .. -DCUTLASS_NVCC_ARCHS=80 # compiles for NVIDIA's Ampere Architecture
```
From the `build/` directory, compile and run the CUTLASS unit tests by building the target `test_unit` with make.
The unit tests are organized as several binaries mirroring the top-level namespaces of CUTLASS,
and they may be executed in parallel via make's `-j` command line argument.
```bash
$ make test_unit -j
...
...
...
[----------] Global test environment tear-down
[==========] 946 tests from 57 test cases ran. (10812 ms total)
[ PASSED ] 946 tests.
```
All tests should pass on supported platforms, though the exact number of tests may vary over time.
# Project Structure
CUTLASS is arranged as a header-only library along with Utilities, Tools, Examples, and unit tests.
[Doxygen documentation](https://nvidia.github.io/cutlass) provides a complete list of files, classes,
and template concepts defined in the CUTLASS project.
A detailed explanation of the source code organization may be found in the
[CUTLASS documentation](media/docs/code_organization.md), but several main components are summarized below.
## CUTLASS Template Library
```
include/ # client applications should target this directory in their build's include paths
cutlass/ # CUDA Templates for Linear Algebra Subroutines and Solvers - headers only
arch/ # direct exposure of architecture features (including instruction-level GEMMs)
conv/ # code specialized for convolution
epilogue/ # code specialized for the epilogue of gemm/convolution
gemm/ # code specialized for general matrix product computations
layout/ # layout definitions for matrices, tensors, and other mathematical objects in memory
platform/ # CUDA-capable Standard Library components
reduction/ # bandwidth-limited reduction kernels that do not fit the "gemm" model
thread/ # simt code that can be performed within a CUDA thread
transform/ # code specialized for layout, type, and domain transformations
* # core vocabulary types, containers, and basic numeric operations
```
### CUTLASS SDK Examples
[CUTLASS SDK examples](/examples) apply CUTLASS templates to implement basic computations.
### Tools
```
tools/
library/ # CUTLASS Instance Library - contains instantiations of all supported CUTLASS templates
include/
cutlass/
library/
profiler/ # CUTLASS Profiler - command-line utility for executing operations in the
# CUTLASS Library
util/ # CUTLASS Utilities - contains numerous helper classes for
include/ # manging tensors in device memory, reference
cutlass/ # implementations for GEMM, random initialization
util/ # of tensors, and I/O.
```
### Test
The `test/unit/` directory consist of unit tests implemented with Google Test that demonstrate
basic usage of Core API components and complete tests of the CUTLASS GEMM computations.
Instructions for building and running the Unit tests are described in the [Quickstart guide](media/docs/quickstart.md).
# Performance Profiling
The `tools/profiler/` directory contains a command-line utility for launching each of the GEMM kernels.
It can be built as follows:
```bash
$ make cutlass_profiler -j16
```
## Building all GEMM and Convolution kernels (_long_ build times)
By default, only one tile size is instantiated for each data type, math instruction, and layout.
To instantiate all, set the following environment variable when running CMake from an empty `build/` directory.
Beware, this results in *thousands* of kernels and long build times.
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS=75 -DCUTLASS_LIBRARY_KERNELS=all
...
$ make cutlass_profiler -j16
```
## Building a subset of GEMM and Convolution kernels (_reduced_ build times)
To compile strictly one kernel or a small set of kernels, a comma-delimited list of kernel names with
wildcard characters may be used to reduce the set of kernels. The following examples show building exactly one
or a subset of kernels for NVIDIA Ampere and Turing architecture:
### Building a subset Tensor Core GEMM kernels
To compile a subset of Tensor Core GEMM kernels with FP32 accumulation and FP16 input targetting NVIDIA Ampere and Turing architecture,
use the below cmake command line:
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_s*gemm_f16_*_nt_align8
...
$ make cutlass_profiler -j16
```
Example command line for profiling a subset of Tensor Core GEMM kernels is as follows:
```bash
./tools/profiler/cutlass_profiler --kernels=cutlass_tensorop_s*gemm_f16_*_nt_align8 --m=3456 --n=4096 --k=4096
...
=============================
Problem ID: 1
Provider: CUTLASS
OperationKind: gemm
Operation: cutlass_tensorop_s1688gemm_f16_256x128_32x2_nt_align8
Status: Success
Verification: ON
Disposition: Passed
reference_device: Passed
cuBLAS: Passed
Arguments: --gemm_kind=universal --m=3456 --n=4096 --k=4096 --A=f16:column --B=f16:row --C=f32:column --alpha=1 \
--beta=0 --split_k_slices=1 --batch_count=1 --op_class=tensorop --accum=f32 --cta_m=256 --cta_n=128 \
--cta_k=32 --stages=2 --warps_m=4 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=8 --min_cc=75 \
--max_cc=1024
Bytes: 118489088 bytes
FLOPs: 115992428544 flops
Runtime: 1.55948 ms
Memory: 70.7616 GiB/s
Math: 74378.8 GFLOP/s
=============================
...
```
### Building one CUDA Core GEMM kernel
To compile one SGEMM kernel targetting NVIDIA Ampere and Turing architecture, use the below cmake command line:
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_simt_sgemm_128x128_8x2_nn_align1
...
$ make cutlass_profiler -j16
```
Example command line for profiling single SGEMM CUDA kernel is as follows:
```bash
$ ./tools/profiler/cutlass_profiler --kernels=sgemm --m=3456 --n=4096 --k=4096
=============================
Problem ID: 1
Provider: CUTLASS
OperationKind: gemm
Operation: cutlass_simt_sgemm_128x128_8x2_nn_align1
Status: Success
Verification: ON
Disposition: Passed
cuBLAS: Passed
Arguments: --m=3456 --n=4096 --k=4096 --A=f32:column --B=f32:column --C=f32:column --alpha=1 --beta=0 --split_k_slices=1 \
--batch_count=1 --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \
--warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024
Bytes: 180355072 bytes
FLOPs: 115992428544 flops
Runtime: 6.73655 ms
Memory: 24.934 GiB/s
Math: 17218.4 GFLOP/s
=============================
```
### Building a subset of Tensor Core Convolution kernels
To compile a subset of Tensor core convolution kernels implementing forward propagation (fprop) with FP32 accumulation
and FP16 input targetting NVIDIA Ampere and Turing architecture, use the below cmake command line:
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_s*fprop_optimized_f16
...
$ make cutlass_profiler -j16
```
Example command line for profiling a subset of Tensor Core convolution kernels is as follows:
```bash
$ ./tools/profiler/cutlass_profiler --kernels=cutlass_tensorop_s*fprop_optimized_f16 --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3
...
=============================
Problem ID: 1
Provider: CUTLASS
OperationKind: conv2d
Operation: cutlass_tensorop_s16816fprop_optimized_f16_128x128_32x5_nhwc
Status: Success
Verification: ON
Disposition: Passed
reference_device: Passed
Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \
--stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f16:nhwc --Filter=f16:nhwc --Output=f32:nhwc \
--conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \
--eq_gemm_provider=none --op_class=tensorop --accum=f32 --cta_m=128 --cta_n=128 --cta_k=32 --stages=5 \
--warps_m=2 --warps_n=2 --warps_k=1 --inst_m=16 --inst_n=8 --inst_k=16 --min_cc=80 --max_cc=1024
Bytes: 1130659840 bytes
FLOPs: 118482796544 flops
Runtime: 0.711496 ms
Memory: 1479.99 GiB/s
Math: 166526 GFLOP/s
=============================
...
```
### Building one Convolution CUDA kernel
To compile and run one CUDA Core convolution kernel implementing forward propagation (fprop) with F32 accumulation
and FP32 input targetting NVIDIA Ampere and Turing architecture, use the below cmake command line:
```bash
$ cmake .. -DCUTLASS_NVCC_ARCHS='75;80' -DCUTLASS_LIBRARY_KERNELS=cutlass_simt_sfprop_optimized_128x128_8x2_nhwc
...
$ make cutlass_profiler -j16
```
Example command line for profiling one CUDA Core convolution kernel:
```bash
$ ./tools/profiler/cutlass_profiler --kernels=cutlass_simt_sfprop_optimized_128x128_8x2_nhwc --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3
=============================
Problem ID: 1
Provider: CUTLASS
OperationKind: conv2d
Operation: cutlass_simt_sfprop_optimized_128x128_8x2_nhwc
Status: Success
Verification: ON
Disposition: Passed
reference_device: Passed
Arguments: --conv_kind=fprop --n=8 --h=224 --w=224 --c=128 --k=128 --r=3 --s=3 --p=224 --q=224 --pad_h=1 --pad_w=1 \
--stride_h=1 --stride_w=1 --dilation_h=1 --dilation_w=1 --Activation=f32:nhwc --Filter=f32:nhwc --Output=f32:nhwc \
--conv_mode=cross --iterator_algorithm=optimized --alpha=1 --beta=0 --split_k_mode=serial --split_k_slices=1 \
--eq_gemm_provider=none --op_class=simt --accum=f32 --cta_m=128 --cta_n=128 --cta_k=8 --stages=2 --warps_m=4 \
--warps_n=2 --warps_k=1 --inst_m=1 --inst_n=1 --inst_k=1 --min_cc=50 --max_cc=1024
Bytes: 2055798784 bytes
FLOPs: 118482796544 flops
Runtime: 7.34266 ms
Memory: 260.752 GiB/s
Math: 16136.2 GFLOP/s
=============================
```
## More Details on Compiling CUTLASS Kernels and CUTLASS Profiler
- Please follow the links for more CMake examples on selectively compiling CUTLASS kernels:
- [GEMM CMake Examples](media/docs/quickstart.md#gemm-cmake-examples)
- [Implicit GEMM conovlution CMake Examples](media/docs/quickstart.md#convolution-cmake-examples)
- [Further details about the CUTLASS Profiler are described here.](media/docs/profiler.md)
# About
CUTLASS is released by NVIDIA Corporation as Open Source software under the
[3-clause "New" BSD license](LICENSE.txt).
# Contributors
The official list of CUTLASS developers and contributors is available here: [CONTRIBUTORS](CONTRIBUTORS.md).
# Copyright
Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
```
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
```
| 23,092 | Markdown | 44.015594 | 199 | 0.712151 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/host_reorder.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief reorder data from the host side
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/tensor_view.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
/// This is needed for the interleaved integer tensor core kernels. The purpose
/// is to use skip the shared memory part in the epilogue.
template <int Interleaved, typename Element, typename Layout>
void reorder_column(TensorRef<Element, Layout> dest,
TensorRef<Element, Layout> src,
cutlass::gemm::GemmCoord problem_size) {
const int InstructionShapeCol = 8;
// 4 threads per Quad
const int ElementsPerThread = InstructionShapeCol / 4;
// 4 threads per Quad
const int ReorderedElementsPerThread =
Interleaved / 4;
for (int n = 0; n < problem_size.n(); n++) {
for (int k = 0; k < problem_size.k(); k++) {
dest.at({k, (n / Interleaved) * Interleaved +
((n % ReorderedElementsPerThread) / ElementsPerThread) *
InstructionShapeCol +
((n % Interleaved) / ReorderedElementsPerThread) *
ElementsPerThread +
(n % ElementsPerThread)}) = src.at({k, n});
}
}
}
template <int ColumnInterleaved, int LayoutInterleaved = ColumnInterleaved, typename Element, typename Layout>
void reorder_convK(TensorRef<Element, Layout> dest,
TensorRef<Element, Layout> src,
cutlass::gemm::GemmCoord problem_size) {
TensorRef<Element, layout::RowMajorInterleaved<LayoutInterleaved>> mappedDest(dest.data(), dest.stride(0));
TensorRef<Element, layout::RowMajorInterleaved<LayoutInterleaved>> mappedSrc(src.data(), src.stride(0));
reorder_column<ColumnInterleaved>(
mappedDest, mappedSrc, problem_size);
}
/// This is needed for the sparse tensor core kernels. The purpose
/// is to use ldmatrix to load from shared memory to the register file.
template <typename Element, typename LayoutDest, typename LayoutSrc>
void reorder_meta(TensorRef<Element, LayoutDest> dest,
TensorRef<Element, LayoutSrc> src,
cutlass::gemm::GemmCoord problem_size) {
for (int m = 0; m < problem_size.m(); m++) {
for (int k = 0; k < problem_size.k(); k++) {
// First reorder the rows.
int group = (sizeof(Element) == 2) ? 32 : 16;
int interweave = (sizeof(Element) == 2) ? 4 : 2;
int dest_row = m / group * group + (m % 8) * interweave + (m % group) / 8;
int dest_col = k;
// Next swizzle the 2x2 blocks from Z to N.
if (((dest_row % 2) == 0) && ((dest_col % 2) == 1)) {
++dest_row;
--dest_col;
} else if (((dest_row % 2) == 1) && ((dest_col % 2) == 0)) {
--dest_row;
++dest_col;
}
dest.at({dest_row, dest_col}) = src.at({m, k});
}
}
}
} // namespace cutlass
| 4,821 | C | 42.053571 | 111 | 0.641361 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/exceptions.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief C++ exception semantics for CUDA error codes
*/
#include <cuda_runtime.h>
#include <iosfwd>
#include <stdexcept>
#include "cutlass/platform/platform.h"
namespace cutlass {
/// C++ exception wrapper for CUDA \p cudaError_t
class cuda_exception : public std::exception {
public:
/// Constructor
cuda_exception(const char* msg = "", cudaError_t err = cudaErrorUnknown) : msg(msg), err(err) {}
/// Returns the underlying CUDA \p cudaError_t
cudaError_t cudaError() const { return err; }
protected:
/// Explanatory string
const char* msg;
/// Underlying CUDA \p cudaError_t
cudaError_t err;
};
/// Writes a cuda_exception instance to an output stream
inline std::ostream& operator<<(std::ostream& out, cuda_exception const& e) {
return out << e.what() << ": " << cudaGetErrorString(e.cudaError());
}
} // namespace cutlass
| 2,674 | C | 37.214285 | 98 | 0.698953 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/tensor_view_io.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/core_io.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/complex.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Helper to write the least significant rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorView_WriteLeastSignificantRank(
std::ostream& out,
TensorView<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (idx) {
out.width(0);
out << ", ";
}
if (idx || coord) {
out.width(width);
}
out << ScalarIO<Element>(view.at(coord));
}
return out;
}
/// Helper to write a rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorView_WriteRank(
std::ostream& out,
TensorView<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
// If called on the least significant rank, write the result as a row
if (rank + 1 == Layout::kRank) {
return TensorView_WriteLeastSignificantRank(out, view, start_coord, rank, width);
}
// Otherwise, write a sequence of rows and newlines
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (rank + 2 == Layout::kRank) {
// Write least significant ranks asa matrix with rows delimited by "\n"
out << (idx ? ",\n" : "");
TensorView_WriteLeastSignificantRank(out, view, coord, rank + 1, width);
}
else {
// Higher ranks are separated by newlines
out << (idx ? ",\n\n" : "");
TensorView_WriteRank(out, view, coord, rank + 1, width);
}
}
return out;
}
/// Helper to write the least significant rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorViewPlanarComplex_WriteLeastSignificantRank(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (idx) {
out.width(0);
out << ", ";
}
if (idx || coord) {
out.width(width);
}
complex<Element> x = view.at(coord);
out << x;
}
return out;
}
/// Helper to write a rank of a TensorView
template <
typename Element,
typename Layout
>
inline std::ostream & TensorViewPlanarComplex_WriteRank(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view,
Coord<Layout::kRank> const &start_coord,
int rank,
std::streamsize width) {
// If called on the least significant rank, write the result as a row
if (rank + 1 == Layout::kRank) {
return TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, start_coord, rank, width);
}
// Otherwise, write a sequence of rows and newlines
for (int idx = 0; idx < view.extent(rank); ++idx) {
Coord<Layout::kRank> coord(start_coord);
coord[rank] = idx;
if (rank + 2 == Layout::kRank) {
// Write least significant ranks asa matrix with rows delimited by ";\n"
out << (idx ? ";\n" : "");
TensorViewPlanarComplex_WriteLeastSignificantRank(out, view, coord, rank + 1, width);
}
else {
// Higher ranks are separated by newlines
out << (idx ? "\n" : "");
TensorViewPlanarComplex_WriteRank(out, view, coord, rank + 1, width);
}
}
return out;
}
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& TensorViewWrite(
std::ostream& out,
TensorView<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return detail::TensorView_WriteRank(out, view, Coord<Layout::kRank>(), 0, out.width());
}
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& operator<<(
std::ostream& out,
TensorView<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return TensorViewWrite(out, view);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& TensorViewWrite(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return detail::TensorViewPlanarComplex_WriteRank(out, view, Coord<Layout::kRank>(), 0, out.width());
}
/// Prints human-readable representation of a TensorView to an ostream
template <
typename Element,
typename Layout
>
inline std::ostream& operator<<(
std::ostream& out,
TensorViewPlanarComplex<Element, Layout> const& view) {
// Prints a TensorView according to the following conventions:
// - least significant rank is printed as rows separated by ";\n"
// - all greater ranks are delimited with newlines
//
// The result is effectively a whitespace-delimited series of 2D matrices.
return TensorViewWrite(out, view);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 8,285 | C | 30.505703 | 102 | 0.646832 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/distribution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief This header contains a class to parametrize a statistical distribution function.
*/
#include <ostream>
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Distribution type
struct Distribution {
/// Variant types
enum Kind { Invalid, Uniform, Gaussian, Identity, Sequential, AllZeros, AllOnes };
/// Distribution state
union {
/// Uniform distribution
struct {
double min;
double max;
} uniform;
/// Gaussian distribution
struct {
double mean;
double stddev;
} gaussian;
/// Elements are linear combination of row and column index
struct {
double start;
double delta;
} sequential;
};
/// Active variant kind
Kind kind;
/// Random values are cast to integer after scaling by this power of two
int int_scale;
//
// Methods
//
Distribution() : kind(Invalid), int_scale(0) {}
/// Configures distribution as uniform random
Distribution &set_uniform(double _min, double _max, int _int_scale = 0) {
kind = Uniform;
uniform.min = _min;
uniform.max = _max;
int_scale = _int_scale;
return *this;
}
/// Configures distribution as Gaussian distribution
Distribution &set_gaussian(double _mean, double _stddev, int _int_scale = 0) {
kind = Gaussian;
gaussian.mean = _mean;
gaussian.stddev = _stddev;
int_scale = _int_scale;
return *this;
}
/// Sets identity
Distribution &set_identity() {
kind = Identity;
return *this;
}
/// Sets sequential
Distribution &set_sequential(double start, double delta, int _int_scale = 0) {
kind = Sequential;
sequential.start = start;
sequential.delta = delta;
int_scale = _int_scale;
return *this;
}
};
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Prints a Distribution to ostream
inline std::ostream &operator<<(std::ostream &out, cutlass::Distribution const &dist) {
switch (dist.kind) {
case cutlass::Distribution::Uniform:
out << "uniform, min: " << dist.uniform.min << ", max: " << dist.uniform.max;
break;
case cutlass::Distribution::Gaussian:
out << "gaussian, mean: " << dist.gaussian.mean << ", stddev: " << dist.gaussian.stddev;
break;
case cutlass::Distribution::Identity:
out << "identity";
break;
case cutlass::Distribution::Sequential:
out << "sequential";
break;
default:
out << "unknown";
}
out << ", int_scale: " << dist.int_scale;
return out;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| 4,597 | C | 30.930555 | 100 | 0.611486 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_nhwc_pooling.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do avg/max pooling on a device memory tensor with NHWC layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do avg/max pooling on a device memory tensor with NHWC layout.
* \tparam T: data type
*/
template <typename T>
void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord filter_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
cutlass::MatrixCoord padding,
cutlass::MatrixCoord stride,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
int poolingType, //0 for avg pooling ; 1 for max pooling
cudaStream_t stream);
/** get the output size of pooling
*/
inline int getOutputSize(int H_W, int padding, int kernel_size, int stride)
{
return (H_W + 2 * padding - kernel_size) / stride + 1;
}
/**
* input is [N, H, W, C]
* assume stride == kernel_size
* output_h = (H + 2*padding_H - kernel_H)/stride_H
* output_w = (W + 2*padding_W - kernel_W)/stride_W
* output is [N, output_h, output_w, C]
* grid(N, output_h, output_w)
* block(min(C, 256)) :
* each block deals with C elements of output when each thread deals with ((C + 255)/256 element of output)
*/
template<typename T, bool IS_AVG_POOLING>
__global__ void pooling_nhwc_element1_kernel(T* output,
const T* input,
const int N,
const int H,
const int W,
const int C,
const int output_H,
const int output_W,
const int kernel_H,
const int kernel_W,
const int stride_H,
const int stride_W,
const int padding_H,
const int padding_W)
{
const int tid = threadIdx.x;
const int n_idx = blockIdx.x;
const int output_h_idx = blockIdx.y;
const int output_w_idx = blockIdx.z;
int h_start_idx = output_h_idx * stride_H - padding_H;
int h_end_idx = h_start_idx + kernel_H;
h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx;
h_end_idx = h_end_idx > H ? H : h_end_idx;
int w_start_idx = output_w_idx * stride_W - padding_W;
int w_end_idx = w_start_idx + kernel_W;
w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx;
w_end_idx = w_end_idx > W ? W : w_end_idx;
input += n_idx * H * W * C;
output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C;
const int kernel_size2 = kernel_H * kernel_W;
for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) {
float pooling;
if (IS_AVG_POOLING){
pooling = 0.0f;
}
else{
pooling = -FLT_MAX;
}
for (int h = h_start_idx; h < h_end_idx; h++) {
for (int w = w_start_idx; w < w_end_idx; w++) {
const int idx = (h * W + w) * C;
const float tmp = static_cast<float>(input[idx + c_idx]);
if (IS_AVG_POOLING){
pooling = pooling + tmp;
}
else{
pooling = pooling > tmp ? pooling : tmp;
}
}
}
T output_val;
if (IS_AVG_POOLING){
output_val = T(pooling/kernel_size2);
}
else{
output_val = T(pooling);
}
output[c_idx] = output_val;
}
}
template<typename T2, typename T, bool IS_AVG_POOLING>
__global__ void pooling_nhwc_element2_kernel(T2* output,
const T2* input,
const int N,
const int H,
const int W,
const int C,
const int output_H,
const int output_W,
const int kernel_H,
const int kernel_W,
const int stride_H,
const int stride_W,
const int padding_H,
const int padding_W)
{
const int tid = threadIdx.x;
const int n_idx = blockIdx.x;
const int output_h_idx = blockIdx.y;
const int output_w_idx = blockIdx.z;
int h_start_idx = output_h_idx * stride_H - padding_H;
int h_end_idx = h_start_idx + kernel_H;
h_start_idx = (h_start_idx < 0) ? 0 : h_start_idx;
h_end_idx = h_end_idx > H ? H : h_end_idx;
int w_start_idx = output_w_idx * stride_W - padding_W;
int w_end_idx = w_start_idx + kernel_W;
w_start_idx = (w_start_idx < 0) ? 0 : w_start_idx;
w_end_idx = w_end_idx > W ? W : w_end_idx;
input += n_idx * H * W * C;
output += ((n_idx * output_H + output_h_idx) * output_W + output_w_idx) * C;
const int kernel_size2 = kernel_H * kernel_W;
for (int c_idx = tid; c_idx < C; c_idx += blockDim.x) {
float2 pooling;
if (IS_AVG_POOLING) {
pooling = {0.0f, 0.0f};
}
else {
pooling = {-FLT_MAX, -FLT_MAX};
}
for (int h = h_start_idx; h < h_end_idx; h++) {
for (int w = w_start_idx; w < w_end_idx; w++) {
const int idx = (h * W + w) * C;
const T2 tmp = input[idx + c_idx];
const float2 tmp_flt2 = {static_cast<float>(tmp.x), static_cast<float>(tmp.y)};
if (IS_AVG_POOLING) {
pooling.x += tmp_flt2.x;
pooling.y += tmp_flt2.y;
}
else {
pooling.x = pooling.x > tmp_flt2.x ? pooling.x : tmp_flt2.x;
pooling.y = pooling.y > tmp_flt2.y ? pooling.y : tmp_flt2.y;
}
}
}
T2 output_val;
if (IS_AVG_POOLING) {
output_val.x = T(pooling.x/kernel_size2);
output_val.y = T(pooling.y/kernel_size2);
}
else {
output_val.x = T(pooling.x);
output_val.y = T(pooling.y);
}
output[c_idx] = output_val;
}
}
/**
* output [N, 1, 1, C]
* input [N, H, W, C]
* grid(C, N)
* block(block_size) -- each block deals with H*W/block_size elements;
*/
template<typename T, bool IS_AVG_POOLING>
__global__ void pooling_nxhTo1x1_element1_kernel(
T* output, const T* input, const int N, const int HW, const int C)
{
const int c_idx = blockIdx.x;
const int n_idx = blockIdx.y;
float pooling[1];
if (IS_AVG_POOLING) {
pooling[0] = 0.0f;
}
else {
pooling[0] = -FLT_MAX;
}
const size_t input_offset = n_idx * HW * C + c_idx;
input += input_offset;
const size_t output_offset = n_idx * C + c_idx;
output += output_offset;
int tid = threadIdx.x;
for (int index = tid; index < HW; index += blockDim.x) {
float val = static_cast<float>(input[index * C]);
if (IS_AVG_POOLING) {
pooling[0] += val;
}
else {
pooling[0] = pooling[0] > val ? pooling[0] : val;
}
}
if (blockDim.x <= 32) {
if (IS_AVG_POOLING) {
warpReduceSum<float, 1>(pooling);
}
else {
warpReduceMax<float, 1>(pooling);
}
}
else {
if (IS_AVG_POOLING) {
blockReduceSum<float, 1>(pooling);
}
else {
blockReduceMax<float, 1>(pooling);
}
}
__syncthreads();
if (threadIdx.x == 0) {
T output_val;
if (IS_AVG_POOLING) {
output_val = T(pooling[0] / HW);
}
else {
output_val = T(pooling[0]);
}
output[0] = output_val;
}
}
/**
* output [N, 1, 1, C]
* input [N, H, W, C]
* grid(C/2, N)
* block(block_size) -- each thread deals with H*W/block_size * 2 elements;
*/
template<typename T2, typename T, bool IS_AVG_POOLING>
__global__ void pooling_nxhTo1x1_element2_kernel(
T2* output, const T2* input, const int N, const int HW, const int C)
{
const int c_idx = blockIdx.x;
const int n_idx = blockIdx.y;
float pooling[2];
if (IS_AVG_POOLING) {
pooling[0] = pooling[1] = 0.0f;
}
else {
pooling[0] = pooling[1] = -FLT_MAX;
}
const int C_2 = C / 2;
const size_t input_offset = n_idx * HW * C_2 + c_idx;
input += input_offset;
const size_t output_offset = n_idx * C_2 + c_idx;
output += output_offset;
int tid = threadIdx.x;
for (int index = tid; index < HW; index += blockDim.x) {
T2 val = input[index * C_2];
float2 val_flt2 = {static_cast<float>(val.x), static_cast<float>(val.y)};
if (IS_AVG_POOLING) {
pooling[0] += val_flt2.x;
pooling[1] += val_flt2.y;
}
else {
pooling[0] = pooling[0] > val_flt2.x ? pooling[0] : val_flt2.x;
pooling[1] = pooling[1] > val_flt2.y ? pooling[1] : val_flt2.y;
}
}
if (blockDim.x <= 32) {
if (IS_AVG_POOLING) {
warpReduceSum<float, 2>(pooling);
}
else {
warpReduceMax<float, 2>(pooling);
}
}
else {
if (IS_AVG_POOLING) {
blockReduceSum<float, 2>(pooling);
}
else {
blockReduceMax<float, 2>(pooling);
}
}
__syncthreads();
if (threadIdx.x == 0) {
T2 output_val;
if (IS_AVG_POOLING) {
output_val.x = T(pooling[0] / HW);
output_val.y = T(pooling[1] / HW);
}
else {
output_val.x = T(pooling[0]);
output_val.y = T(pooling[1]);
}
output[0] = output_val;
}
}
template <typename T>
void pooling_nhwc(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord filter_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
cutlass::Tensor4DCoord padding,
cutlass::MatrixCoord stride,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
int poolingType, //0 for avg pooling ; 1 for max pooling
cudaStream_t stream) {
assert(input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.c() == output_tensor_size.c());
assert(filter_tensor_size.h() == stride.row() &&
filter_tensor_size.w() == stride.column());
const int N = input_tensor_size.n();
const int H = input_tensor_size.h();
const int W = input_tensor_size.w();
const int C = input_tensor_size.c();
const int padding_H = padding.h();
const int padding_W = padding.w();
const int kernel_H = filter_tensor_size.h();
const int kernel_W = filter_tensor_size.w();
const int stride_H = stride.row();
const int stride_W = stride.column();
const int output_H = getOutputSize(H, padding_H, kernel_H, stride_H);
const int output_W = getOutputSize(W, padding_W, kernel_W, stride_W);
assert(output_tensor_size.h() == output_H &&
output_tensor_size.w() == output_W);
if (C % 2 != 0) {
if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) {
dim3 grid(C, N);
dim3 block(256);
if (H*W < block.x){
block.x = (H*W + 31)/32*32;
}
if (poolingType == 0) {
pooling_nxhTo1x1_element1_kernel<T, true><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H*W,
C);
} // if (poolingType == 0)
else {
pooling_nxhTo1x1_element1_kernel<T, false><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H*W,
C);
}
} // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0))
else {
dim3 grid(N, output_H, output_W);
dim3 block(256);
if (C < block.x) {
block.x = C;
}
if (poolingType == 0) {
pooling_nhwc_element1_kernel<T, true><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H,
W,
C,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (poolingType == 0)
else {
pooling_nhwc_element1_kernel<T, false><<<grid, block, 0, stream>>>(
ref_output.data(),
ref_input.data(),
N,
H,
W,
C,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
}
} // if (C % 2 != 0))
else {
if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0)) {
dim3 grid(C/2, N);
dim3 block(256);
if (H*W < block.x){
block.x = (H*W + 31)/32*32;
}
if (poolingType == 0) {
if (std::is_same<T, float>::value) {
pooling_nxhTo1x1_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H*W,
C);
} // if (std::is_same<T, float>::value)
else {
pooling_nxhTo1x1_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H*W,
C);
}
} // if (poolingType == 0)
else {
if (std::is_same<T, float>::value) {
pooling_nxhTo1x1_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H*W,
C);
} // if (std::is_same<T, float>::value)
else {
pooling_nxhTo1x1_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H*W,
C);
}
}
} // if ((H == kernel_H && padding_H == 0) && (W == kernel_W && padding_W == 0))
else {
dim3 grid(N, output_H, output_W);
dim3 block(256);
if (C/2 < block.x) {
block.x = C/2;
}
if (poolingType == 0) {
if (std::is_same<T, float>::value) {
pooling_nhwc_element2_kernel<float2, float, true><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (std::is_same<T, float>::value)
else {
pooling_nhwc_element2_kernel<half2, half, true><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
} // if (poolingType == 0)
else {
if (std::is_same<T, float>::value) {
pooling_nhwc_element2_kernel<float2, float, false><<<grid, block, 0, stream>>>(
(float2*)(ref_output.data()),
(const float2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
} // if (std::is_same<T, float>::value)
else {
pooling_nhwc_element2_kernel<half2, half, false><<<grid, block, 0, stream>>>(
(half2*)(ref_output.data()),
(const half2*)(ref_input.data()),
N,
H,
W,
C/2,
output_H,
output_W,
kernel_H,
kernel_W,
stride_H,
stride_W,
padding_H,
padding_W);
}
}
}
}
}
} //namespace cutlass
| 18,653 | C | 31.329289 | 107 | 0.498097 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_dump.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdio.h>
#include "cutlass/cutlass.h"
/**
* \file
* \brief C++ interface to dump fragments and shared memory contents for
* debugging.
*/
namespace cutlass {
namespace debug {
/******************************************************************************
* Dump the fragments
******************************************************************************/
/// The first N threads dump the first M elements from their fragments with a
/// stride of S elements. If N is not specified, dump the data of all the
/// threads. If M is not specified, dump all the elements of the fragment.
template <typename Fragment>
CUTLASS_DEVICE void dump_fragment(Fragment const& frag, int N = 0, int M = 0,
int S = 1) {
int total_threads = blockDim.x * blockDim.y * blockDim.z;
int block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x;
if (N < 0 || N > total_threads) {
if (thread_id == 0 && block_id == 0)
printf("Thread number N = %d should between [1, %d].\n", N,
total_threads);
__syncthreads();
return;
}
int total_elements = frag.size();
if (M < 0 || M > total_elements) {
if (thread_id == 0 && block_id == 0)
printf("Element number M = %d should between [1, %d].\n", M,
total_elements);
__syncthreads();
return;
}
if (N == 0) N = total_threads;
if (M == 0) M = total_elements;
if (S < 1 || S > M) {
if (thread_id == 0 && block_id == 0)
printf("Stride S = %d should between [1, %d].\n", S, M);
__syncthreads();
return;
}
if (thread_id == 0 && block_id == 0)
printf("\n*******************Dumping the fragments*******************\n\n");
CUTLASS_PRAGMA_NO_UNROLL
for (int tid = 0; tid < N; ++tid) {
if (tid == thread_id) {
printf("TB%d W%d T%d: ", block_id, tid / 32, tid & 31);
CUTLASS_PRAGMA_NO_UNROLL
for (int i = 0; i < M; i += S) {
printf("%.0f ", float(typename Fragment::value_type(frag[i])));
}
printf("\n");
}
__syncthreads();
}
if (thread_id == 0 && block_id == 0)
printf("\n***********************************************************\n\n");
__syncthreads();
return;
}
/******************************************************************************
* Dump the shared memory
******************************************************************************/
#define SHMEM_ROW_SIZE 128
/// Dump the shared memory contents. ptr is the begin address, size specifies
/// the number of elements that need to be dumped, and S specifies the stride.
template <typename Element>
CUTLASS_DEVICE void dump_shmem(Element const* ptr, size_t size, int S = 1) {
int block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int thread_id = (threadIdx.z * (blockDim.x * blockDim.y)) +
(threadIdx.y * blockDim.x) + threadIdx.x;
if (ptr == nullptr) {
if (thread_id == 0 && block_id == 0) printf("ptr is null.\n");
__syncthreads();
return;
}
if (size < 1) {
if (thread_id == 0 && block_id == 0)
printf("Element size is less than 1\n");
__syncthreads();
return;
}
int row_elements = SHMEM_ROW_SIZE / sizeof(Element);
if (S < 1 || S > row_elements) {
if (thread_id == 0 && block_id == 0)
printf("Stride S = %d should between [1, %d].\n", S, row_elements);
__syncthreads();
return;
}
__syncthreads();
if (thread_id == 0)
printf("\n********Dumping the shared memory of TB %d*******\n\n", block_id);
if (thread_id == 0) {
for (int i = 0; i < size; i += row_elements) {
for (int j = 0; j < row_elements; j += S) {
printf("%.0f ", float(ptr[i + j]));
}
printf("\n");
}
}
if (thread_id == 0)
printf("\n***********************************************************\n\n");
__syncthreads();
return;
}
} // namespace debug
} // namespace cutlass
| 5,953 | C | 30.670213 | 100 | 0.555014 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_groupnorm.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do group norm on a device memory tensor with NHWC layout. The tensor will be divided into [N, H, W, G, C'] and then we do normalization on [H, W, C'].
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do group norm on a device memory tensor with NHWC layout.
* \tparam T: data type
*/
template <typename T>
void groupnorm(cutlass::Tensor4DCoord input_size,
const int num_groups,
const float eps,
TensorRef<T, layout::TensorNHWC> ref_output,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_gamma,
TensorRef<T, layout::TensorNHWC> ref_beta,
cudaStream_t stream);
extern __shared__ char groupnorm_shm[];
// For small prod_dim1_to_last_dim/num_groups, to avoid multiple loads from global memory,
// we store the input in the shared memory.
// grid(num_groups, dim0)
// block(BLOCKSIZE)
// BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group
template<typename TVec, typename T, int T_PER_TVec>
__global__ void groupnorm_twopass_store_locally(T* output,
const T* input,
const T* gamma,
const T* beta,
int num_groups,
int prod_dim1_to_last_dim,
int last_dim,
const float eps,
const int TVecs_PER_THREAD)
{
const int bid = blockIdx.y; // index of batch
const int gid = blockIdx.x; // index of group
const int tid = threadIdx.x; // index of thread
const int bdimx = blockDim.x;
const int s_reduce_elements = prod_dim1_to_last_dim / num_groups;
const int v_reduce_elements = s_reduce_elements / T_PER_TVec;
const int s_group_stride = last_dim / num_groups;
const int v_group_stride = s_group_stride / T_PER_TVec;
const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec;
const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group;
TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group;
T* local_val = ((T*)groupnorm_shm) + TVecs_PER_THREAD * T_PER_TVec * tid;
float local_sum[1] = {0.0f};
// load from global memory into shared memory
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
const int local_val_offset = i * T_PER_TVec;
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(tmp_vec_ptr[j]);
local_sum[0] += tmp;
local_val[local_val_offset + j] = tmp_vec_ptr[j];
}
}
}
__shared__ float s_mean, s_variance;
// reduction for mean
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_mean = local_sum[0] / s_reduce_elements;
}
__syncthreads();
// reduction for std
local_sum[0] = 0.0f;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int local_val_offset = i * T_PER_TVec;
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(local_val[local_val_offset + j]);
tmp -= s_mean;
local_sum[0] += tmp * tmp;
}
}
}
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps);
}
__syncthreads();
// normalize
const int gamma_offset_of_group = gid * v_group_stride;
const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group;
const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec;
const int local_val_offset = i * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group];
TVec beta_val = beta_TVec_ptr[gamma_offset_in_group];
T* gamma_val_ptr = (T*)(&gamma_val);
T* beta_val_ptr = (T*)(&beta_val);
TVec tmp_vec;
T* tmp_vec_ptr = (T*)(&tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = (static_cast<float>(local_val[local_val_offset + j]) - s_mean) * s_variance
* static_cast<float>(gamma_val_ptr[j])
+ static_cast<float>(beta_val_ptr[j]);
if (sizeof(T) == sizeof(half)) {
tmp_vec_ptr[j] = T(__float2half_rn(tmp));
}
else {
tmp_vec_ptr[j] = T(tmp);
}
}
output_TVec_ptr[offset_in_group] = tmp_vec;
}
}
}
// For large prod_dim1_to_last_dim/num_groups,
// in which the data cannot be stored locally,
// we will load from global memory multiple times,
// grid(num_groups, dim0)
// block(BLOCKSIZE)
// BLOCKSIZE * TVecs_PER_THREAD <= prod_dim1_to_last_dim/num_group
template<typename TVec, typename T, int T_PER_TVec>
__global__ void groupnorm_twopass_multiple_load(T* output,
const T* input,
const T* gamma,
const T* beta,
int num_groups,
int prod_dim1_to_last_dim,
int last_dim,
const float eps,
const int TVecs_PER_THREAD)
{
const int bid = blockIdx.y; // index of batch
const int gid = blockIdx.x; // index of group
const int tid = threadIdx.x; // index of thread
const int bdimx = blockDim.x;
const int s_reduce_elements = prod_dim1_to_last_dim / num_groups;
const int v_reduce_elements = s_reduce_elements / T_PER_TVec;
const int s_group_stride = last_dim / num_groups;
const int v_group_stride = s_group_stride / T_PER_TVec;
const int offset_of_group = (bid * prod_dim1_to_last_dim + gid * s_group_stride) / T_PER_TVec;
const TVec* input_TVec_ptr = (const TVec*)(input) + offset_of_group;
TVec* output_TVec_ptr = (TVec*)(output) + offset_of_group;
float local_sum[1] = {0.0f};
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(tmp_vec_ptr[j]);
local_sum[0] += tmp;
}
}
}
__shared__ float s_mean, s_variance;
// reduction for mean
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_mean = local_sum[0] / s_reduce_elements;
}
__syncthreads();
// reduction for std
local_sum[0] = 0.0f;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp = static_cast<float>(tmp_vec_ptr[j]);
tmp -= s_mean;
local_sum[0] += tmp * tmp;
}
}
}
if (bdimx <= 32) {
warpReduceSum<float, 1>(local_sum);
}
else {
blockReduceSum<float, 1>(local_sum);
}
if (tid == 0) {
s_variance = rsqrtf(local_sum[0] / s_reduce_elements + eps);
}
__syncthreads();
// normalize
const int gamma_offset_of_group = gid * v_group_stride;
const TVec* gamma_TVec_ptr = (const TVec*)gamma + gamma_offset_of_group;
const TVec* beta_TVec_ptr = (const TVec*)beta + gamma_offset_of_group;
#pragma unroll
for (int i = 0; i < TVecs_PER_THREAD; i += 1) {
const int current_load_start_idx = (i * bdimx + tid) * T_PER_TVec;
if (current_load_start_idx < s_reduce_elements) {
const int offset_in_group =
((current_load_start_idx / s_group_stride) * last_dim + (current_load_start_idx % s_group_stride))
/ T_PER_TVec;
const int gamma_offset_in_group = (current_load_start_idx % s_group_stride) / T_PER_TVec;
TVec gamma_val = gamma_TVec_ptr[gamma_offset_in_group];
TVec beta_val = beta_TVec_ptr[gamma_offset_in_group];
T* gamma_val_ptr = (T*)(&gamma_val);
T* beta_val_ptr = (T*)(&beta_val);
TVec tmp_vec = input_TVec_ptr[offset_in_group];
T* tmp_vec_ptr = (T*)(&tmp_vec);
TVec output_tmp_vec;
T* output_tmp_vec_ptr = (T*)(&output_tmp_vec);
#pragma unroll
for (int j = 0; j < T_PER_TVec; j++) {
float tmp =
(static_cast<float>(tmp_vec_ptr[j]) - s_mean) * s_variance * static_cast<float>(gamma_val_ptr[j])
+ static_cast<float>(beta_val_ptr[j]);
if (sizeof(T) == sizeof(half)) {
output_tmp_vec_ptr[j] = T(__float2half_rn(tmp));
}
else {
output_tmp_vec_ptr[j] = T(tmp);
}
}
output_TVec_ptr[offset_in_group] = output_tmp_vec;
}
}
}
//ref_input & ref_output should be [N, H, W, C]
//ref_gamma & ref_beta shoud be [1, 1, 1, C]
template <typename T>
void groupnorm(cutlass::Tensor4DCoord input_size,
const int num_groups,
const float eps,
TensorRef<T, layout::TensorNHWC> ref_output,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_gamma,
TensorRef<T, layout::TensorNHWC> ref_beta,
cudaStream_t stream){
const int N = input_size.n();
const int H = input_size.h();
const int W = input_size.w();
const int C = input_size.c();
if (C % num_groups != 0){
printf("[ERROR] C should be a multiple of num_groups.\n");
}
T* output = ref_output.data();
const T* input = ref_input.data();
const T* gamma = ref_gamma.data();
const T* beta = ref_beta.data();
const int dim0 = N;
const int last_dim = C;
const int prod_dim1_to_last_dim = H*W*C;
const int s_reduce_elements = prod_dim1_to_last_dim / num_groups;
const int s_group_stride = last_dim / num_groups;
dim3 grid(num_groups, dim0);
int threadblock_size = 32;
if (s_group_stride % 2 == 0) {
const int T_PER_TVec = 2;
while (threadblock_size < 1024) {
if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8)
break;
threadblock_size *= 2;
}
dim3 block(threadblock_size);
const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size;
const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T);
// for small s_reduce_elements, specific case for H=W=22, C=1280, num_groups=32;
// the size of grid & block may have better choice for different cases.
// ensure shared memory is smaller than 48KB
if (std::is_same<T, float>::value){
if (shm_size < 48 * 1024) {
groupnorm_twopass_store_locally<float2, T, T_PER_TVec><<<grid, block, shm_size, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
else {
groupnorm_twopass_multiple_load<float2, T, T_PER_TVec><<<grid, block, 0, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
}
else{
if (shm_size < 48 * 1024) {
groupnorm_twopass_store_locally<half2, T, T_PER_TVec><<<grid, block, shm_size, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
else {
groupnorm_twopass_multiple_load<half2, T, T_PER_TVec><<<grid, block, 0, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
}
}
else {
const int T_PER_TVec = 1;
while (threadblock_size < 1024) {
if (s_reduce_elements / T_PER_TVec / threadblock_size <= 8)
break;
threadblock_size *= 2;
}
dim3 block(threadblock_size);
const int TVec_PER_THREAD = (s_reduce_elements / T_PER_TVec + threadblock_size - 1) / threadblock_size;
const int shm_size = T_PER_TVec * TVec_PER_THREAD * threadblock_size * sizeof(T);
if (shm_size < 48 * 1024) {
groupnorm_twopass_store_locally<T, T, T_PER_TVec><<<grid, block, shm_size, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
else {
groupnorm_twopass_multiple_load<T, T, T_PER_TVec><<<grid, block, 0, stream>>>(
output, input, gamma, beta, num_groups, prod_dim1_to_last_dim, last_dim, eps, TVec_PER_THREAD);
}
}
}
} //namespace cutlass
| 17,695 | C | 42.91067 | 176 | 0.545465 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/debug.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Contains code for debugging cutlass code
*/
#pragma once
#include "device_dump.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
/******************************************************************************
* Debug and logging macros
******************************************************************************/
/**
* Formats and prints the given message to stdout
*/
#if !defined(CUDA_LOG)
#if !defined(__CUDA_ARCH__)
#define CUDA_LOG(format, ...) printf(format, __VA_ARGS__)
#else
#define CUDA_LOG(format, ...) \
printf("[block (%d,%d,%d), thread (%d,%d,%d)]: " format, \
blockIdx.x, \
blockIdx.y, \
blockIdx.z, \
threadIdx.x, \
threadIdx.y, \
threadIdx.z, \
__VA_ARGS__);
#endif
#endif
/**
* Formats and prints the given message to stdout only if DEBUG is defined
*/
#if !defined(CUDA_LOG_DEBUG)
#ifdef DEBUG
#define CUDA_LOG_DEBUG(format, ...) CUDA_LOG(format, __VA_ARGS__)
#else
#define CUDA_LOG_DEBUG(format, ...)
#endif
#endif
/**
* \brief The corresponding error message is printed to \p stderr (or \p stdout in device code)
* along with the supplied source context.
*
* \return The CUDA error.
*/
__host__ CUTLASS_DEVICE cudaError_t cuda_perror_impl(cudaError_t error,
const char* expression,
const char* filename,
int line) {
(void)filename;
(void)line;
if (error) {
#if !defined(__CUDA_ARCH__)
fprintf(
stderr, "CUDA error %d [%s, %d] in expression '%s': %s\n", error, filename, line, expression, cudaGetErrorString(error));
fflush(stderr);
#else
printf("CUDA error %d [%s, %d] in expression '%s'\n", error, filename, line, expression);
#endif
}
return error;
}
/**
* \brief Perror macro
*/
#ifndef CUDA_PERROR
#define CUDA_PERROR(e) cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)
#endif
/**
* \brief Perror macro with exit
*/
#ifndef CUDA_PERROR_EXIT
#define CUDA_PERROR_EXIT(e) \
do { if (cuda_perror_impl((cudaError_t)(e), #e, __FILE__, __LINE__)) { \
exit(1); \
} } while (0)
#endif
/**
* \brief Perror macro only if DEBUG is defined
*/
#ifndef CUDA_PERROR_DEBUG
#ifdef DEBUG
#define CUDA_PERROR_DEBUG(e) CUDA_PERROR(e)
#else
#define CUDA_PERROR_DEBUG(e) (e)
#endif
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// A small helper class to dump a type at compile time
// Usage:: DumpType<Class>::Class
template <typename T>
struct DebugType {};
template <typename T>
void DebugTypeFunc(T const& t) {
T::t;
}
// A small helper class to dump a compile time constant at compile time
// Usage: DumpValue<Class::kConstant>::kConstant
template <int Value>
struct DebugValue {};
| 5,104 | C | 34.451389 | 129 | 0.551724 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_nhwc_to_nchw.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to transform a device memory tensor from NHWC layout to NCHW layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
/** \brief interface to transform a device memory tensor from NHWC layout to NCHW layout.
* \tparam T: data type
*/
template <typename T>
void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNCHW> ref_output,
cudaStream_t stream);
template <typename T>
__global__ void nhwc_to_nchw_kernel(T *output,
const T *input,
const int n,
const int h,
const int w,
const int c) {
const int hw = h*w;
const int hwc = hw*c;
__shared__ T shbuf[32 * (32 + 1)];
const int32_t tid = threadIdx.y*blockDim.x + threadIdx.x;
const int32_t wid = tid / 32;
const int32_t lid = tid % 32;
const int32_t ni = blockIdx.z;
const int32_t hwi0 = blockIdx.y * 32;
const int32_t ci0 = blockIdx.x * 32;
const size_t input_idx = ni * hwc + (hwi0 + wid) * c + ci0;
const T *A = input + input_idx;
if (ci0 + lid < c) {
const int lid_x_33 = lid * 33;
if ((hwi0 + 32) <= hw) {
int hwi = wid; // between 0 and 7
CUTLASS_PRAGMA_UNROLL
for (int cLoopIdx = 0; cLoopIdx < 4; cLoopIdx++) {
shbuf[lid_x_33 + hwi] = A[lid];
A = &A[8 * c];
hwi += 8;
}
} else {
for (int hwi = wid; hwi < 32; hwi += 8) {
if ((hwi + hwi0) < hw) {
shbuf[lid_x_33 + hwi] = A[lid];
}
A = &A[8 * c];
}
}
}
__syncthreads();
const int32_t hwiOut = hwi0 + lid;
output = &output[ni * hwc + hwiOut];
if (hwiOut < hw) {
if (ci0 + 32 < c) {
int cI = wid;
CUTLASS_PRAGMA_UNROLL
for (int hwLoopIdx = 0; hwLoopIdx < 4; ++hwLoopIdx) {
output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid];
cI += 8;
}
} else {
for (int cI = wid; cI < 32; cI += 8) {
if (ci0 + cI < c) {
output[(ci0 + cI) * hw] = shbuf[(cI)*33 + lid];
}
}
}
}
}
template <typename T>
void nhwc_to_nchw(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNCHW> ref_output,
cudaStream_t stream) {
assert(
input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.h() == output_tensor_size.c() &&
input_tensor_size.w() == output_tensor_size.h() &&
input_tensor_size.c() == output_tensor_size.w());
int n = input_tensor_size.n();
int h = input_tensor_size.h();
int w = input_tensor_size.w();
int c = input_tensor_size.c();
dim3 grid((c + 31)/32, (h*w + 31)/32, n);
dim3 block(32, 8);
nhwc_to_nchw_kernel<<<grid, block, 0, stream>>>(ref_output.data(), ref_input.data(),
n, h, w, c);
}
} //namespace cutlass
| 5,214 | C | 34.965517 | 91 | 0.583429 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_nhwc_padding.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels for padding in device memory with NHWC layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
namespace cutlass {
/** \brief interface for padding in a device memory tensor with NHWC layout
* \tparam T: data type
*/
template <typename T>
void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
cudaStream_t stream);
template <typename T>
__global__ void nhwc_padding_kernel(const int32_t n,
const int32_t h,
const int32_t w,
const int32_t c_in,
const int32_t c_out,
const T zero,
const T *input,
T *output){
const int32_t idx_jump = blockDim.x * gridDim.x;
const int32_t total_elements = n * h * w * c_out;
int32_t c_idx, w_idx, h_idx, n_idx, resudial;
T value;
for (int32_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < total_elements; idx += idx_jump) {
c_idx = idx%c_out;
if (c_idx >= c_in){
value = zero;
}
else{
resudial = idx/c_out;
w_idx = resudial%w;
resudial = resudial/w;
h_idx = resudial%h;
n_idx = resudial/h;
resudial = ((n_idx * h + h_idx) * w + w_idx) * c_in + c_idx;
value = input[resudial];
}
output[idx] = value;
}
}
// fast kernel for c_in = 3 & c_out = 4
template <typename Tio, typename Telement, int element_in_Tio>
__global__ void nhwc_padding_channel_3To4_kernel(const int32_t n,
const int32_t h,
const int32_t w,
const Tio *input,
Tio *output,
const int32_t max_output_element,
const int32_t max_input_element,
const Tio zero_io,
const Telement zero_element){
__shared__ Tio shm[192];
const int tidx = blockIdx.x * 192 + threadIdx.x;
const int threadidx = threadIdx.x;
shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx];
__syncthreads();
const int ouput_offset = blockIdx.x * 256;
const int lower_bound = max_output_element < ouput_offset + 256 ? max_output_element : ouput_offset + 256;
for (int i = ouput_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192)
{
const Telement* shm_element = (const Telement*)shm + j*3*element_in_Tio/4;
Telement array[element_in_Tio];
CUTLASS_PRAGMA_UNROLL
for (int k = 0 ; k < element_in_Tio ; k++)
array[k] = ((k+1)%4 == 0) ? zero_element : shm_element[(k > 3) ? (k - 1) : k];
output[i] = *((const Tio *)array);
}
}
// fast kernel for c_in = 3 & c_out = 8
template <typename Tio, typename Telement, int element_in_Tio>
__global__ void nhwc_padding_channel_3To8_kernel(const int32_t n,
const int32_t h,
const int32_t w,
const Tio *input,
Tio *output,
const int32_t max_output_element,
const int32_t max_input_element,
const Tio zero_io,
const Telement zero_element){
__shared__ Tio shm[192];
const int tidx = blockIdx.x * 192 + threadIdx.x;
const int threadidx = threadIdx.x;
shm[threadIdx.x] = tidx >= max_input_element ? zero_io : input[tidx];
__syncthreads();
const int ouput_offset = blockIdx.x * 512;
const int lower_bound = max_output_element < ouput_offset + 512 ? max_output_element : ouput_offset + 512;
for (int i = ouput_offset + threadidx, j = threadidx ; i < lower_bound ; i+=192, j+=192)
{
const Telement* shm_element = (const Telement*)shm + (element_in_Tio == 4 ? j/2 : j)*3;
Telement array[element_in_Tio];
//float
if (element_in_Tio == 4){
CUTLASS_PRAGMA_UNROLL
for (int k = 0 ; k < element_in_Tio ; k++)
array[k] = ((j % 2) == 1) ? zero_element : ((k >= 3) ? zero_element : shm_element[k]);
}
//half
else{
CUTLASS_PRAGMA_UNROLL
for (int k = 0 ; k < element_in_Tio ; k++)
array[k] = (k >= 3) ? zero_element : shm_element[k];
}
output[i] = *((const Tio *)array);
}
}
template <typename T>
void nhwc_padding(cutlass::Tensor4DCoord input_tensor_size,
cutlass::Tensor4DCoord output_tensor_size,
TensorRef<T, layout::TensorNHWC> ref_input,
TensorRef<T, layout::TensorNHWC> ref_output,
cudaStream_t stream){
assert(
input_tensor_size.n() == output_tensor_size.n() &&
input_tensor_size.h() == output_tensor_size.h() &&
input_tensor_size.w() == output_tensor_size.w() &&
input_tensor_size.c() <= output_tensor_size.c());
int n = input_tensor_size.n();
int h = input_tensor_size.h();
int w = input_tensor_size.w();
int c_in = input_tensor_size.c();
int c_out = output_tensor_size.c();
//case 1 : channel == 3 padding to 4 or 8
if ((c_out == 4 || c_out == 8) && c_in == 3 && (n*h*w % 8 == 0)){
dim3 block(192);
const int nhw = n*h*w;
const int nhwc = nhw*c_in;
//for half_t
if (cutlass::sizeof_bits<T>::value == 16){
const int element_in_Tio = 8;
const int max_input_element = nhwc/element_in_Tio;
const int max_output_element = nhw*c_out/element_in_Tio;
const int4 zero_io = {0, 0, 0, 0};
const half_t zero_element = static_cast<half_t>(0.0f);
dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio));
if (c_out == 4){
nhwc_padding_channel_3To4_kernel<int4, half_t, element_in_Tio><<<grid, block, 0, stream>>>
(n, h, w,
(const int4 *)ref_input.data(),
(int4 *)ref_output.data(),
max_output_element,
max_input_element,
zero_io,
zero_element);
}
else if (c_out == 8){
nhwc_padding_channel_3To8_kernel<int4, half_t, element_in_Tio><<<grid, block, 0, stream>>>
(n, h, w,
(const int4 *)ref_input.data(),
(int4 *)ref_output.data(),
max_output_element,
max_input_element,
zero_io,
zero_element);
}
}
//for float
else{
const int element_in_Tio = 4;
const int max_input_element = nhwc/element_in_Tio;
const int max_output_element = nhw*c_out/element_in_Tio;
const float4 zero_io = {0.0f, 0.0f, 0.0f, 0.0f};
const float zero_element = 0.0f;
dim3 grid((nhwc + 192*element_in_Tio - 1)/(192*element_in_Tio));
if (c_out == 4){
nhwc_padding_channel_3To4_kernel<float4, float, element_in_Tio><<<grid, block, 0, stream>>>
(n, h, w,
(const float4 *)ref_input.data(),
(float4 *)ref_output.data(),
max_output_element,
max_input_element,
zero_io,
zero_element);
}
else if (c_out == 8){
nhwc_padding_channel_3To8_kernel<float4, float, element_in_Tio><<<grid, block, 0, stream>>>
(n, h, w,
(const float4 *)ref_input.data(),
(float4 *)ref_output.data(),
max_output_element,
max_input_element,
zero_io,
zero_element);
}
}
}
//case 2 : even channel
else if ((c_out % 2) == 0 && (c_in % 2) == 0){
int32_t total_elements = n * h * w * c_out / 2;
int block_size = 256;
dim3 grid((total_elements + 255)/256);
dim3 block(block_size);
//for half_t
if (cutlass::sizeof_bits<T>::value == 16){
const __half2 zero = {0.0f, 0.0f};
nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in/2, c_out/2, zero, (const __half2*)ref_input.data(), (__half2*)ref_output.data());
}
//for float
else{
const float2 zero = {0.0f, 0.0f};
nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in/2, c_out/2, zero, (const float2*)ref_input.data(), (float2*)ref_output.data());
}
}
//case 3 : odd channel
else{
int32_t total_elements = n * h * w * c_out;
int block_size = 256;
dim3 grid((total_elements + 255)/256);
dim3 block(block_size);
const T zero = static_cast<T>(0.0f);
nhwc_padding_kernel<<<grid, block, 0, stream>>>(n, h, w, c_in, c_out, zero, ref_input.data(), ref_output.data());
}
}
} //namespace cutlass
| 11,067 | C | 38.956679 | 149 | 0.544502 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_utils.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief utils code for device cutlass code
*/
#pragma once
#include <cuda_fp16.h>
#include <float.h>
#define FINAL_MASK 0xffffffff
struct half4 {
half x, y, z, w;
};
template<typename T, int NUM>
__inline__ __device__ T warpReduceSum(T* val)
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1)
val[i] += __shfl_xor_sync(FINAL_MASK, val[i], mask, 32);
}
return (T)(0.0f);
}
template<typename T, int NUM>
__inline__ __device__ T blockReduceSum(T* val)
{
__shared__ T shared[NUM][33];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
warpReduceSum<T, NUM>(val);
if (lane == 0) {
#pragma unroll
for (int i = 0; i < NUM; i++) {
shared[i][wid] = val[i];
}
}
__syncthreads();
bool is_mask = threadIdx.x < (blockDim.x / 32.f);
#pragma unroll
for (int i = 0; i < NUM; i++) {
val[i] = is_mask ? shared[i][lane] : (T)(0.0f);
}
warpReduceSum<T, NUM>(val);
return (T)0.0f;
}
template<typename T, int NUM>
__inline__ __device__ T warpReduceMax(T* val)
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
#pragma unroll
for (int mask = 16; mask > 0; mask >>= 1)
val[i] = max(val[i], __shfl_xor_sync(FINAL_MASK, val[i], mask, 32));
}
return (T)(0.0f);
}
template<typename T, int NUM>
__inline__ __device__ T blockReduceMax(T* val)
{
static __shared__ T shared[32][NUM];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
warpReduceMax<T, NUM>(val); // get maxx in each warp
if (lane == 0) // record in-warp maxx by warp Idx
{
#pragma unroll
for (int i = 0; i < NUM; i++) {
shared[wid][i] = val[i];
}
}
__syncthreads();
// Modify from blockDim.x << 5 to blockDim.x / 32. to prevent
// blockDim.x is not divided by 32
bool is_mask = threadIdx.x < (blockDim.x / 32.f);
#pragma unroll
for (int i = 0; i < NUM; i++) {
val[i] = is_mask ? shared[lane][i] : (T)(-FLT_MAX);
}
warpReduceMax<T, NUM>(val);
return (T)0.0f;
}
| 4,007 | C | 30.3125 | 100 | 0.607687 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_layernorm.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief cuda kernels to do layernorm on a device memory tensor with RowMajor layout.
*/
#include "cutlass/cutlass.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_coord.h"
#include "cutlass/tensor_ref.h"
#include "device_utils.h"
#include <float.h>
namespace cutlass {
/** \brief interface to do layernorm on a device memory tensor with RowMajor layout.
* \tparam T: data type
*/
template <typename T>
void layernorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_gamma,
TensorRef<T, layout::RowMajor> ref_beta,
cudaStream_t stream);
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with n elements ; each thread deals with ITEM_PER_THREAD elements
*/
template<typename T, int ITEM_PER_THREAD>
__global__ void layernorm_twoPassAlgo_stored_locally_e1(T* output,
const T* input,
const T* gamma,
const T* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
T local_val[ITEM_PER_THREAD];
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
const T zero = T(0.0f);
#pragma unroll
for (int i = 0 ; i < ITEM_PER_THREAD ; i++){
int index = tid + i*bdimx;
local_val[i] = index < n ? input[index] : zero;
local_sums[0] += static_cast<float>(local_val[i]);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
#pragma unroll
for (int i = 0 ; i < ITEM_PER_THREAD ; i++){
int index = tid + i*bdimx;
if (index < n){
const float tmp = static_cast<float>(local_val[i]) - s_mean;
local_sums[0] += tmp * tmp;
}
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
#pragma unroll
for (int i = 0 ; i < ITEM_PER_THREAD ; i++){
int index = tid + i*bdimx;
if (index < n) {
const T gamma_val = gamma[index];
const T beta_val = beta[index];
output[index] = T((static_cast<float>(local_val[i]) - s_mean) * s_variance * static_cast<float>(gamma_val) + static_cast<float>(beta_val));
}
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*2 elements;
*/
template<typename T2, typename T, int ITEM_PER_THREAD>
__global__ void layernorm_twoPassAlgo_stored_locally_e2(T2* output,
const T2* input,
const T2* gamma,
const T2* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
T2 local_val[ITEM_PER_THREAD];
const int n_2 = n / 2;
int offset = m_idx * n_2;
input += offset;
output += offset;
const T2 zero = {T(0.0f), T(0.0f)};
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
local_val[i] = index < n_2 ? input[index] : zero;
local_sums[0] += static_cast<float>(local_val[i].x) + static_cast<float>(local_val[i].y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_2){
const float2 tmp = {static_cast<float>(local_val[i].x) - s_mean,
static_cast<float>(local_val[i].y) - s_mean};
local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y;
}
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_2){
const T2 gamma_val = gamma[index];
const T2 beta_val = beta[index];
T2 tmp;
tmp.x = T((static_cast<float>(local_val[i].x) - s_mean)*s_variance*static_cast<float>(gamma_val.x) + static_cast<float>(beta_val.x));
tmp.y = T((static_cast<float>(local_val[i].y) - s_mean)*s_variance*static_cast<float>(gamma_val.y) + static_cast<float>(beta_val.y));
output[index] = tmp;
}
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*4 elements;
*/
template<typename T4, typename T, int ITEM_PER_THREAD>
__global__ void layernorm_twoPassAlgo_stored_locally_e4(T4* output,
const T4* input,
const T4* gamma,
const T4* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
T4 local_val[ITEM_PER_THREAD];
const int n_4 = n / 4;
int offset = m_idx * n_4;
input += offset;
output += offset;
const T4 zero = {T(0.0f), T(0.0f), T(0.0f), T(0.0f)};
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
local_val[i] = index < n_4 ? input[index] : zero;
local_sums[0] += static_cast<float>(local_val[i].x) + static_cast<float>(local_val[i].y) +
static_cast<float>(local_val[i].z) + static_cast<float>(local_val[i].w);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_4){
const float4 tmp = {static_cast<float>(local_val[i].x) - s_mean,
static_cast<float>(local_val[i].y) - s_mean,
static_cast<float>(local_val[i].z) - s_mean,
static_cast<float>(local_val[i].w) - s_mean};
local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y + tmp.z * tmp.z + tmp.w * tmp.w;
}
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
#pragma UNROLL
for (int i = 0; i < ITEM_PER_THREAD; i += 1) {
const int index = i*bdimx + tid;
if (index < n_4){
const T4 gamma_val = gamma[index];
const T4 beta_val = beta[index];
T4 tmp;
tmp.x = T((static_cast<float>(local_val[i].x) - s_mean)*s_variance*static_cast<float>(gamma_val.x) + static_cast<float>(beta_val.x));
tmp.y = T((static_cast<float>(local_val[i].y) - s_mean)*s_variance*static_cast<float>(gamma_val.y) + static_cast<float>(beta_val.y));
tmp.z = T((static_cast<float>(local_val[i].z) - s_mean)*s_variance*static_cast<float>(gamma_val.z) + static_cast<float>(beta_val.z));
tmp.w = T((static_cast<float>(local_val[i].w) - s_mean)*s_variance*static_cast<float>(gamma_val.w) + static_cast<float>(beta_val.w));
output[index] = tmp;
}
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with n elements ; each thread deals with ITEM_PER_THREAD elements
*/
template<typename T>
__global__ void layernorm_twoPassAlgo_e1(T* output,
const T* input,
const T* gamma,
const T* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
int offset = m_idx * n;
input += offset;
output += offset;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_sums[0] += local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
for (int index = tid ; index < n ; index += bdimx){
float local_val = static_cast<float>(input[index]);
local_val = local_val - s_mean;
local_sums[0] += local_val * local_val;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
for (int index = tid ; index < n ; index += bdimx){
const T gamma_val = gamma[index];
const T beta_val = beta[index];
const T local_val = input[index];
output[index] = T((static_cast<float>(local_val) - s_mean) * s_variance * static_cast<float>(gamma_val) + static_cast<float>(beta_val));
}
}
/**
* output [m, n] row-major
* input [m, n] row-major
* gamma [n]
* beta [n]
* grid(m)
* block(block_size) -- each block deals with block_size*ITEM_PER_THREAD*2 elements;
*/
template<typename T2, typename T>
__global__ void layernorm_twoPassAlgo_e2(T2* output,
const T2* input,
const T2* gamma,
const T2* beta,
const int m,
const int n)
{
const int m_idx = blockIdx.x;
const int tid = threadIdx.x;
const int bdimx = blockDim.x;
__shared__ float s_mean, s_variance;
float local_sums[1] = {0.0f};
const int n_2 = n / 2;
int offset = m_idx * n_2;
input += offset;
output += offset;
for (int index = tid; index < n_2; index += bdimx) {
const T2 local_val = input[index];
local_sums[0] += static_cast<float>(local_val.x) + static_cast<float>(local_val.y);
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_mean = local_sums[0] / n;
}
__syncthreads();
local_sums[0] = 0.0f;
for (int index = tid; index < n_2; index += bdimx) {
const T2 local_val = input[index];
const float2 tmp = {static_cast<float>(local_val.x) - s_mean,
static_cast<float>(local_val.y) - s_mean};
local_sums[0] += tmp.x * tmp.x + tmp.y * tmp.y;
}
if (blockDim.x <= 32) {
warpReduceSum<float, 1>(local_sums);
}
else {
blockReduceSum<float, 1>(local_sums);
}
if (threadIdx.x == 0) {
s_variance = rsqrtf(local_sums[0] / n + 1e-5);
}
__syncthreads();
for (int index = tid; index < n_2; index += bdimx) {
const T2 local_val = input[index];
const T2 gamma_val = gamma[index];
const T2 beta_val = beta[index];
T2 tmp;
tmp.x = T((static_cast<float>(local_val.x) - s_mean)*s_variance*static_cast<float>(gamma_val.x) + static_cast<float>(beta_val.x));
tmp.y = T((static_cast<float>(local_val.y) - s_mean)*s_variance*static_cast<float>(gamma_val.y) + static_cast<float>(beta_val.y));
output[index] = tmp;
}
}
template <typename T>
void layernorm(cutlass::MatrixCoord tensor_size,
TensorRef<T, layout::RowMajor> ref_output,
TensorRef<T, layout::RowMajor> ref_input,
TensorRef<T, layout::RowMajor> ref_gamma,
TensorRef<T, layout::RowMajor> ref_beta,
cudaStream_t stream){
const int m = tensor_size.row();
const int n = tensor_size.column();
T* output = ref_output.data();
const T* input = ref_input.data();
const T* gamma = ref_gamma.data();
const T* beta = ref_beta.data();
dim3 grid(m);
dim3 block((n + 31)/32*32);
if (block.x > 1024){
block.x = 1024;
}
// TODO : There should be better configs for different cases, we only use several samples to show how to use here
// TODO : using registers to store values locally can reduce the ldgs from global memory and speedup the kernels.
if ((n % 4 == 0) && (n >= 128) && (n <= 4096)) {
block.x = (n/4 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e4<float4, float, 1><<<grid, block, 0, stream>>>(
(float4*)output,
(const float4*)input,
(const float4*)gamma,
(const float4*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e4<half4, half, 1><<<grid, block, 0, stream>>>(
(half4*)output,
(const half4*)input,
(const half4*)gamma,
(const half4*)beta,
m,
n);
}
} //if ((n % 4 == 0) && (n >= 128) && (n <= 4096))
else if (n % 2 == 0) {
if (n / 2 <= 1024) {
block.x = (n/2 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 1><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} //if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 1><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n / 2 <= 1024)
else if (n <= 8192) {
block.x = ((n + 7)/8 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 4><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 4><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n <= 8192)
else if (n <= 16384) {
block.x = ((n + 15)/ 16 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 8><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 8><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n <= 16384)
else if (n <= 32768) {
block.x = ((n + 31)/32 + 31)/32*32;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_stored_locally_e2<float2, float, 16><<<grid, block, 0, stream>>>(
(float2*)output,
(const float2*)input,
(const float2*)gamma,
(const float2*)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_stored_locally_e2<half2, half, 16><<<grid, block, 0, stream>>>(
(half2*)output,
(const half2*)input,
(const half2*)gamma,
(const half2*)beta,
m,
n);
}
} // if (n <= 32768)
else {
if (block.x > 512)
block.x = 512;
if (std::is_same<T, float>::value) {
layernorm_twoPassAlgo_e2<float2, float><<<grid, block, 0, stream>>>(
(float2 *)output,
(const float2 *)input,
(const float2 *)gamma,
(const float2 *)beta,
m,
n);
} // if (std::is_same<T, float>::value)
else {
layernorm_twoPassAlgo_e2<half2, half><<<grid, block, 0, stream>>>(
(half2 *)output,
(const half2 *)input,
(const half2 *)gamma,
(const half2 *)beta,
m,
n);
}
}
} // if (n % 2 == 0)
else {
if (n <= 1024) {
layernorm_twoPassAlgo_stored_locally_e1<T, 1><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 1024)
else if (n <= 8192) {
block.x = ((n + 7)/8 + 31)/32*32;
layernorm_twoPassAlgo_stored_locally_e1<T, 8><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 8192)
else if (n <= 16384) {
block.x = ((n + 15)/16 + 32)/32*32;
layernorm_twoPassAlgo_stored_locally_e1<T, 16><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 16384)
else if (n <= 32768) {
block.x = ((n + 31)/32 + 31)/32*32;
layernorm_twoPassAlgo_stored_locally_e1<T, 32><<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
} // if (n <= 32768)
else{
if (block.x > 512) {
block.x = 512;
}
layernorm_twoPassAlgo_e1<<<grid, block, 0, stream>>>(
output,
input,
gamma,
beta,
m,
n);
}
}
}
} //namespace cutlass
| 20,880 | C | 31.373643 | 145 | 0.536159 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/index_sequence.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
// integer_sequence moved to cutlass/numeric_types.h
| 1,962 | C | 49.333332 | 100 | 0.695209 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/command_line.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* Utility for parsing command line arguments
*/
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include "cutlass/cutlass.h"
namespace cutlass {
/******************************************************************************
* command_line
******************************************************************************/
/**
* Utility for parsing command line arguments
*/
struct CommandLine {
std::vector<std::string> keys;
std::vector<std::string> values;
std::vector<std::string> args;
/**
* Constructor
*/
CommandLine(int argc, const char** argv) {
using namespace std;
for (int i = 1; i < argc; i++) {
string arg = argv[i];
if ((arg[0] != '-') || (arg[1] != '-')) {
args.push_back(arg);
continue;
}
string::size_type pos;
string key, val;
if ((pos = arg.find('=')) == string::npos) {
key = string(arg, 2, arg.length() - 2);
val = "";
} else {
key = string(arg, 2, pos - 2);
val = string(arg, pos + 1, arg.length() - 1);
}
keys.push_back(key);
values.push_back(val);
}
}
/**
* Checks whether a flag "--<flag>" is present in the commandline
*/
bool check_cmd_line_flag(const char* arg_name) const {
using namespace std;
for (int i = 0; i < int(keys.size()); ++i) {
if (keys[i] == string(arg_name)) return true;
}
return false;
}
/**
* Returns number of naked (non-flag and non-key-value) commandline parameters
*/
size_t num_naked_args() const {
return args.size();
}
/**
* Print naked (non-flag and non-key-value) commandline parameters
*/
void print_naked_args(std::ostream &out) const {
for (auto arg : args) {
out << " " << arg <<"\n";
}
}
/**
* Returns the commandline parameter for a given index (not including flags)
*/
template <typename value_t>
void get_cmd_line_argument(int index, value_t& val) const {
using namespace std;
if (index < args.size()) {
istringstream str_stream(args[index]);
str_stream >> val;
}
}
/**
* Obtains the boolean value specified for a given commandline parameter --<flag>=<bool>
*/
void get_cmd_line_argument(const char* arg_name, bool& val, bool _default) const {
val = _default;
if (check_cmd_line_flag(arg_name)) {
std::string value;
get_cmd_line_argument(arg_name, value);
val = !(value == "0" || value == "false");
}
}
/**
* Obtains the value specified for a given commandline parameter --<flag>=<value>
*/
template <typename value_t>
void get_cmd_line_argument(const char* arg_name,
value_t& val) const {
get_cmd_line_argument(arg_name, val, val);
}
/**
* Obtains the value specified for a given commandline parameter --<flag>=<value>
*/
template <typename value_t>
void get_cmd_line_argument(const char* arg_name,
value_t& val,
value_t const& _default) const {
using namespace std;
val = _default;
for (int i = 0; i < int(keys.size()); ++i) {
if (keys[i] == string(arg_name)) {
istringstream str_stream(values[i]);
str_stream >> val;
}
}
}
/**
* Returns the values specified for a given commandline parameter --<flag>=<value>,<value>*
*/
template <typename value_t>
void get_cmd_line_arguments(const char* arg_name,
std::vector<value_t>& vals,
char sep = ',') const {
using namespace std;
if (check_cmd_line_flag(arg_name)) {
// Clear any default values
vals.clear();
// Recover from multi-value string
for (int i = 0; i < keys.size(); ++i) {
if (keys[i] == string(arg_name)) {
string val_string(values[i]);
seperate_string(val_string, vals, sep);
}
}
}
}
/**
* Returns the values specified for a given commandline parameter
* --<flag>=<value>,<value_start:value_end>*
*/
void get_cmd_line_argument_pairs(const char* arg_name,
std::vector<std::pair<std::string, std::string> >& tokens,
char delim = ',',
char sep = ':') const {
if (check_cmd_line_flag(arg_name)) {
std::string value;
get_cmd_line_argument(arg_name, value);
tokenize(tokens, value, delim, sep);
}
}
/**
* Returns a list of ranges specified for a given commandline parameter
* --<flag>=<key:value>,<key:value>*
*/
void get_cmd_line_argument_ranges(const char* arg_name,
std::vector<std::vector<std::string> >& vals,
char delim = ',',
char sep = ':') const {
std::vector<std::string> ranges;
get_cmd_line_arguments(arg_name, ranges, delim);
for (std::vector<std::string>::const_iterator range = ranges.begin();
range != ranges.end(); ++range) {
std::vector<std::string> range_vals;
seperate_string(*range, range_vals, sep);
vals.push_back(range_vals);
}
}
/**
* The number of pairs parsed
*/
int parsed_argc() const { return (int)keys.size(); }
//-------------------------------------------------------------------------
// Utility functions
//-------------------------------------------------------------------------
/// Tokenizes a comma-delimited list of string pairs delimited by ':'
static void tokenize(std::vector<std::pair<std::string, std::string> >& tokens,
std::string const& str,
char delim = ',',
char sep = ':') {
// Home-built to avoid Boost dependency
size_t s_idx = 0;
size_t d_idx = std::string::npos;
while (s_idx < str.size()) {
d_idx = str.find_first_of(delim, s_idx);
size_t end_idx = (d_idx != std::string::npos ? d_idx : str.size());
size_t sep_idx = str.find_first_of(sep, s_idx);
size_t offset = 1;
if (sep_idx == std::string::npos || sep_idx >= end_idx) {
sep_idx = end_idx;
offset = 0;
}
std::pair<std::string, std::string> item(
str.substr(s_idx, sep_idx - s_idx),
str.substr(sep_idx + offset, end_idx - sep_idx - offset));
tokens.push_back(item);
s_idx = end_idx + 1;
}
}
/// Tokenizes a comma-delimited list of string pairs delimited by ':'
static void tokenize(std::vector<std::string>& tokens,
std::string const& str,
char delim = ',',
char sep = ':') {
typedef std::vector<std::pair<std::string, std::string> > TokenVector;
typedef TokenVector::const_iterator token_iterator;
std::vector<std::pair<std::string, std::string> > token_pairs;
tokenize(token_pairs, str, delim, sep);
for (token_iterator tok = token_pairs.begin(); tok != token_pairs.end(); ++tok) {
tokens.push_back(tok->first);
}
}
template <typename value_t>
static void seperate_string(std::string const& str,
std::vector<value_t>& vals,
char sep = ',') {
std::istringstream str_stream(str);
std::string::size_type old_pos = 0;
std::string::size_type new_pos = 0;
// Iterate <sep>-delimited values
value_t val;
while ((new_pos = str.find(sep, old_pos)) != std::string::npos) {
if (new_pos != old_pos) {
str_stream.width(new_pos - old_pos);
str_stream >> val;
vals.push_back(val);
}
// skip over delimiter
str_stream.ignore(1);
old_pos = new_pos + 1;
}
// Read last value
str_stream >> val;
vals.push_back(val);
}
};
} // namespace cutlass
| 9,774 | C | 30.130573 | 93 | 0.557602 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/host_tensor.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief HostTensor contributes management for both host and device memory.
HostTensor allocates host and device memory upon construction. Basic element-wise operations on
host memory synchronize device memory automatically. Explicit copy operations provide abstractions
for CUDA memcpy operations.
Call {host, device}_{data, ref, view}() for accessing host or device memory.
See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details.
*/
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "device_memory.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Host tensor
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class HostTensor {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// Tensor reference to device memory
using TensorRef = TensorRef<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// Tensor reference to device memory
using TensorView = TensorView<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorView = typename TensorView::ConstTensorView;
/// Reference to element in tensor
using Reference = typename TensorRef::Reference;
/// Constant reference to element in tensor
using ConstReference = typename ConstTensorRef::Reference;
/// Used to handle packing of subbyte elements
static int const kElementsPerStoredItem = (sizeof_bits<Element>::value < 8 ? (8 / sizeof_bits<Element>::value) : 1);
private:
//
// Data members
//
/// Extent of tensor in logical dimensions
TensorCoord extent_;
/// Layout object
Layout layout_;
/// Host-side memory allocation
std::vector<Element> host_;
/// Device-side memory
device_memory::allocation<Element> device_;
public:
//
// Device and Host Methods
//
/// Default constructor
HostTensor() {}
/// Constructs a tensor given an extent. Assumes a packed layout
HostTensor(
TensorCoord const &extent,
bool device_backed = true
) {
this->reset(extent, Layout::packed(extent), device_backed);
}
/// Constructs a tensor given an extent and layout
HostTensor(
TensorCoord const &extent,
Layout const &layout,
bool device_backed = true
) {
this->reset(extent, layout, device_backed);
}
~HostTensor() { }
/// Clears the HostTensor allocation to size/capacity = 0
void reset() {
extent_ = TensorCoord();
layout_ = Layout::packed(extent_);
host_.clear();
device_.reset();
}
/// Resizes internal memory allocations without affecting layout or extent
void reserve(
size_t count, ///< size of tensor in elements
bool device_backed_ = true) { ///< if true, device memory is also allocated
device_.reset();
host_.clear();
count /= kElementsPerStoredItem;
host_.resize(count);
// Allocate memory
Element* device_memory = nullptr;
if (device_backed_) {
device_memory = device_memory::allocate<Element>(count);
}
device_.reset(device_memory, device_backed_ ? count : 0);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
reserve(size_t(layout_.capacity(extent_)), device_backed_);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout. Assumes a packed tensor configuration.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
reset(extent, Layout::packed(extent), device_backed_);
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset().
void resize(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
LongIndex new_size = size_t(layout_.capacity(extent_));
if (static_cast<decltype(host_.size())>(new_size) > host_.size()) {
reserve(new_size);
}
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration.
void resize(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
resize(extent, Layout::packed(extent), device_backed_);
}
/// Returns the number of elements stored in the host tensor
size_t size() const {
return host_.size() * kElementsPerStoredItem;
}
/// Returns the logical capacity based on extent and layout. May differ from size().
LongIndex capacity() const {
return layout_.capacity(extent_);
}
/// Gets pointer to host data
Element * host_data() { return host_.data(); }
/// Gets pointer to host data with a pointer offset
Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory<Element>::get(host_.data(), ptr_element_offset); }
/// Gets a reference to an element in host memory
Reference host_data(LongIndex idx) {
return ReferenceFactory<Element>::get(host_data(), idx);
}
/// Gets pointer to host data
Element const * host_data() const { return host_.data(); }
/// Gets a constant reference to an element in host memory
ConstReference host_data(LongIndex idx) const {
return ReferenceFactory<Element const>::get(host_data(), idx);
}
/// Gets pointer to device data
Element * device_data() { return device_.get(); }
/// Gets pointer to device data with a pointer offset
Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return &ReferenceFactory<Element>::get(device_data(), ptr_element_offset); }
/// Gets pointer to device data
Element const * device_data() const { return device_.get(); }
/// Accesses the tensor reference pointing to data
TensorRef host_ref(LongIndex ptr_element_offset=0) { return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_); }
/// Accesses the tensor reference pointing to data
ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const { return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_); }
/// Accesses the tensor reference pointing to data
TensorRef device_ref(LongIndex ptr_element_offset=0) {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_);
}
/// Accesses the tensor reference pointing to data
ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_);
}
/// Accesses the tensor reference pointing to data
TensorView host_view(LongIndex ptr_element_offset=0) {
return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView host_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
TensorView device_view(LongIndex ptr_element_offset=0) {
return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView device_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, extent_);
}
/// Returns true if device memory is allocated
bool device_backed() const {
return (device_.get() == nullptr) ? false : true;
}
/// Returns the layout object
Layout & layout() {
return layout_;
}
/// Returns the layout object
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride vector
Stride & stride() {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
LongIndex stride(int dim) const {
return layout_.stride().at(dim);
}
/// Returns the layout object's stride in a given physical dimension
LongIndex & stride(int dim) {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at the logical Coord in host memory
Reference at(TensorCoord const& coord) {
return host_data(offset(coord));
}
/// Returns a const reference to the element at the logical Coord in host memory
ConstReference at(TensorCoord const& coord) const {
return host_data(offset(coord));
}
/// Returns the extent of the tensor
TensorCoord extent() const {
return extent_;
}
/// Returns the extent of the tensor
TensorCoord & extent() {
return extent_;
}
/// Copies data from device to host
void sync_host() {
if (device_backed()) {
device_memory::copy_to_host(
host_data(), device_data(), size());
}
}
/// Copies data from host to device
void sync_device() {
if (device_backed()) {
device_memory::copy_to_device(
device_data(), host_data(), size());
}
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_host(
Element const* ptr_device, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
host_data(), ptr_device, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_device(
Element const* ptr_device, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
device_data(), ptr_device, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_device(
Element const* ptr_host, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
device_data(), ptr_host, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_host(
Element const* ptr_host, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
host_data(), ptr_host, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_host(
Element * ptr_host, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
ptr_host, device_data(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_device(
Element * ptr_device, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
ptr_device, device_data(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_device(
Element * ptr_device, ///< source host memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
ptr_device, host_data(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_host(
Element * ptr_host, ///< source host memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
ptr_host, host_data(), count);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 16,745 | C | 31.964567 | 143 | 0.648731 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/host_tensor_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
/*! \file
\brief HostTensor contributes management for both host and device memory.
HostTensor allocates host and device memory upon construction. Basic element-wise operations on
host memory synchronize device memory automatically. Explicit copy operations provide abstractions
for CUDA memcpy operations.
Call {host, device}_{data, ref, view}() for accessing host or device memory.
See cutlass/tensor_ref.h and cutlass/tensor_view.h for more details.
*/
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref_planar_complex.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "device_memory.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Host tensor
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class HostTensorPlanarComplex {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// Tensor reference to device memory
using TensorRef = TensorRefPlanarComplex<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorRef = typename TensorRef::ConstTensorRef;
/// Tensor reference to device memory
using TensorView = TensorViewPlanarComplex<Element, Layout>;
/// Tensor reference to constant device memory
using ConstTensorView = typename TensorView::ConstTensorView;
/// Reference to element in tensor
using Reference = typename TensorRef::Reference;
/// Constant reference to element in tensor
using ConstReference = typename ConstTensorRef::Reference;
private:
//
// Data members
//
/// Extent of tensor in logical dimensions
TensorCoord extent_;
/// Layout object
Layout layout_;
/// Host-side memory allocation
std::vector<Element> host_;
/// Device-side memory
device_memory::allocation<Element> device_;
public:
//
// Device and Host Methods
//
/// Default constructor
HostTensorPlanarComplex() {}
/// Constructs a tensor given an extent. Assumes a packed layout
HostTensorPlanarComplex(
TensorCoord const &extent,
bool device_backed = true
) {
this->reset(extent, Layout::packed(extent), device_backed);
}
/// Constructs a tensor given an extent and layout
HostTensorPlanarComplex(
TensorCoord const &extent,
Layout const &layout,
bool device_backed = true
) {
this->reset(extent, layout, device_backed);
}
~HostTensorPlanarComplex() { }
/// Clears the HostTensor allocation to size/capacity = 0
void reset() {
extent_ = TensorCoord();
layout_ = Layout::packed(extent_);
host_.clear();
device_.reset();
}
/// Resizes internal memory allocations without affecting layout or extent
void reserve(
size_t count, ///< size of tensor in elements
bool device_backed_ = true) { ///< if true, device memory is also allocated
device_.reset();
host_.clear();
host_.resize(count * 2);
// Allocate memory
Element* device_memory = nullptr;
if (device_backed_) {
device_memory = device_memory::allocate<Element>(count * 2);
}
device_.reset(device_memory, device_backed_ ? count * 2 : 0);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
reserve(size_t(layout_.capacity(extent_)), device_backed_);
}
/// Updates the extent and layout of the HostTensor. Allocates memory according to the new
/// extent and layout. Assumes a packed tensor configuration.
void reset(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
reset(extent, Layout::packed(extent), device_backed_);
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset().
void resize(
TensorCoord const &extent, ///< extent of logical tensor
Layout const &layout, ///< layout object of tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
extent_ = extent;
layout_ = layout;
LongIndex new_size = size_t(layout_.capacity(extent_));
if (static_cast<decltype(host_.size())>(new_size * 2) > host_.size()) {
reserve(new_size);
}
}
/// Changes the size of the logical tensor. Only allocates memory if new capacity exceeds reserved capacity.
/// To force allocation, call reset(). Note, this form of resize() assumes a packed tensor configuration.
void resize(
TensorCoord const &extent, ///< extent of logical tensor
bool device_backed_ = true) { ///< if true, device memory is also allocated.
resize(extent, Layout::packed(extent), device_backed_);
}
/// Returns the number of elements stored in the host tensor
size_t size() const {
return host_.size() / 2;
}
/// Returns the logical capacity based on extent and layout. May differ from size().
LongIndex capacity() const {
return layout_.capacity(extent_);
}
/// Stride between real and imaginary parts
LongIndex imaginary_stride() const {
return host_.size() / 2;
}
/// Gets pointer to host data
Element * host_data() { return host_.data(); }
/// Gets pointer to host data imaginary part
Element * host_data_imag() { return host_.data() + imaginary_stride(); }
/// Gets pointer to host data with a pointer offset
Element * host_data_ptr_offset(LongIndex ptr_element_offset) { return host_data() + ptr_element_offset; }
/// Gets pointer to host data with a pointer offset
Element * host_data_imag_ptr_offset(LongIndex ptr_element_offset) { return host_data_imag() + ptr_element_offset; }
/// Gets a reference to an element in host memory
Reference host_data(LongIndex idx) {
return PlanarComplexReference<Element>(host_data() + idx, host_data_imag() + idx);
}
/// Gets pointer to host data
Element const * host_data() const { return host_.data(); }
/// Gets pointer to host data imaginary part
Element const * host_data_imag() const { return host_.data() + imaginary_stride(); }
/// Gets a constant reference to an element in host memory
ConstReference host_data(LongIndex idx) const {
return PlanarComplexReference<Element const>(host_data() + idx, host_data_imag() + idx);
}
/// Gets pointer to device data
Element * device_data() { return device_.get(); }
/// Gets pointer to device data with a pointer offset
Element * device_data_ptr_offset(LongIndex ptr_element_offset) { return device_.get() + ptr_element_offset; }
/// Gets pointer to device data
Element const * device_data() const { return device_.get(); }
/// Gets pointer to device data with a pointer offset
Element const * device_data_ptr_offset(LongIndex ptr_element_offset) const { return device_.get() + ptr_element_offset; }
/// Gets a pointer to the device data imaginary part
Element * device_data_imag() { return device_.get() + imaginary_stride(); }
/// Accesses the tensor reference pointing to data
TensorRef host_ref(LongIndex ptr_element_offset=0) {
return TensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> host_ref_real() {
return cutlass::TensorRef<Element, Layout>(host_data(), layout_);
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> host_ref_imag() {
return cutlass::TensorRef<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_);
}
/// Accesses the tensor reference pointing to data
ConstTensorRef host_ref(LongIndex ptr_element_offset=0) const {
return ConstTensorRef(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Accesses the tensor reference pointing to data
TensorRef device_ref(LongIndex ptr_element_offset=0) {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Accesses the tensor reference pointing to data
ConstTensorRef device_ref(LongIndex ptr_element_offset=0) const {
return TensorRef(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride());
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> device_ref_real() {
return cutlass::TensorRef<Element, Layout>(device_data(), layout_);
}
/// Returns a tensor reference to the real part of the tensor
cutlass::TensorRef<Element, Layout> device_ref_imag() {
return cutlass::TensorRef<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_);
}
/// Accesses the tensor reference pointing to data
TensorView host_view(LongIndex ptr_element_offset=0) {
return TensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView host_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(host_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> host_view_real() {
return cutlass::TensorView<Element, Layout>(host_data(), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> host_view_imag() {
return cutlass::TensorView<Element, Layout>(host_data_ptr_offset(imaginary_stride()), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
TensorView device_view(LongIndex ptr_element_offset=0) {
return TensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
ConstTensorView device_view(LongIndex ptr_element_offset=0) const {
return ConstTensorView(device_data_ptr_offset(ptr_element_offset), layout_, imaginary_stride(), extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> device_view_real() {
return cutlass::TensorView<Element, Layout>(device_data(), layout_, extent_);
}
/// Accesses the tensor reference pointing to data
cutlass::TensorView<Element, Layout> device_view_imag() {
return cutlass::TensorView<Element, Layout>(device_data_ptr_offset(imaginary_stride()), layout_, extent_);
}
/// Returns true if device memory is allocated
bool device_backed() const {
return (device_.get() == nullptr) ? false : true;
}
/// Returns the layout object
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
Index stride(int dim) const {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at the logical Coord in host memory
Reference at(TensorCoord const& coord) {
return host_data(offset(coord));
}
/// Returns a const reference to the element at the logical Coord in host memory
ConstReference at(TensorCoord const& coord) const {
return host_data(offset(coord));
}
/// Returns the extent of the tensor
TensorCoord extent() const {
return extent_;
}
/// Returns the extent of the tensor
TensorCoord & extent() {
return extent_;
}
/// Copies data from device to host
void sync_host() {
if (device_backed()) {
device_memory::copy_to_host(
host_data(), device_data(), imaginary_stride() * 2);
}
}
/// Copies data from host to device
void sync_device() {
if (device_backed()) {
device_memory::copy_to_device(
device_data(), host_data(), imaginary_stride() * 2);
}
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_host(
Element const* ptr_device_real, ///< source device memory
Element const* ptr_device_imag, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
host_data(), ptr_device_real, count);
device_memory::copy_to_host(
host_data_imag(), ptr_device_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_device_to_device(
Element const* ptr_device_real, ///< source device memory
Element const* ptr_device_imag, ///< source device memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
device_data(), ptr_device_real, count);
device_memory::copy_device_to_device(
device_data_imag(), ptr_device_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_device(
Element const* ptr_host_real, ///< source host memory
Element const* ptr_host_imag, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
device_data(), ptr_host_real, count);
device_memory::copy_to_device(
device_data_imag(), ptr_host_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_in_host_to_host(
Element const* ptr_host_real, ///< source host memory
Element const* ptr_host_imag, ///< source host memory
LongIndex count = -1) { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
host_data(), ptr_host_real, count);
device_memory::copy_host_to_host(
host_data_imag(), ptr_host_imag, count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_host(
Element * ptr_host_real, ///< source device memory
Element * ptr_host_imag, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_host(
ptr_host_real, device_data(), count);
device_memory::copy_to_host(
ptr_host_imag, device_data_imag(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_device_to_device(
Element * ptr_device_real, ///< source device memory
Element * ptr_device_imag, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_device_to_device(
ptr_device_real, device_data(), count);
device_memory::copy_device_to_device(
ptr_device_imag, device_data_imag(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_device(
Element * ptr_device_real, ///< source device memory
Element * ptr_device_imag, ///< source device memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_to_device(
ptr_device_real, host_data(), count);
device_memory::copy_to_device(
ptr_device_imag, host_data_imag(), count);
}
/// Copy data from a caller-supplied device pointer into host memory.
void copy_out_host_to_host(
Element * ptr_host_real, ///< source host memory
Element * ptr_host_imag, ///< source host memory
LongIndex count = -1) const { ///< number of elements to transfer; if negative, entire tensor is overwritten.
if (count < 0) {
count = capacity();
}
else {
count = __NV_STD_MIN(capacity(), count);
}
device_memory::copy_host_to_host(
ptr_host_real, host_data(), count);
device_memory::copy_host_to_host(
ptr_host_imag, host_data_imag(), count);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 20,354 | C | 33.383446 | 123 | 0.656431 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/device_memory.h | /******************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#pragma once
/**
* \file
* \brief C++ interface to CUDA device memory management functions.
*/
#include <memory>
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_types.h"
#include "exceptions.h"
namespace cutlass {
namespace device_memory {
/******************************************************************************
* Allocation lifetime
******************************************************************************/
/// Allocate a buffer of \p count elements of type \p T on the current CUDA device
template <typename T>
T* allocate(size_t count = 1) {
T* ptr = 0;
size_t bytes = 0;
bytes = count * sizeof(T);
cudaError_t cuda_error = cudaMalloc((void**)&ptr, bytes);
if (cuda_error != cudaSuccess) {
throw cuda_exception("Failed to allocate memory", cuda_error);
}
return ptr;
}
/// Free the buffer pointed to by \p ptr
template <typename T>
void free(T* ptr) {
if (ptr) {
cudaError_t cuda_error = (cudaFree(ptr));
if (cuda_error != cudaSuccess) {
throw cuda_exception("Failed to free device memory", cuda_error);
}
}
}
/******************************************************************************
* Data movement
******************************************************************************/
template <typename T>
void copy(T* dst, T const* src, size_t count, cudaMemcpyKind kind) {
size_t bytes = count * sizeof_bits<T>::value / 8;
if (bytes == 0 && count > 0)
bytes = 1;
cudaError_t cuda_error = (cudaMemcpy(dst, src, bytes, kind));
if (cuda_error != cudaSuccess) {
throw cuda_exception("cudaMemcpy() failed", cuda_error);
}
}
template <typename T>
void copy_to_device(T* dst, T const* src, size_t count = 1) {
copy(dst, src, count, cudaMemcpyHostToDevice);
}
template <typename T>
void copy_to_host(T* dst, T const* src, size_t count = 1) {
copy(dst, src, count, cudaMemcpyDeviceToHost);
}
template <typename T>
void copy_device_to_device(T* dst, T const* src, size_t count = 1) {
copy(dst, src, count, cudaMemcpyDeviceToDevice);
}
template <typename T>
void copy_host_to_host(T* dst, T const* src, size_t count = 1) {
copy(dst, src, count, cudaMemcpyHostToHost);
}
/// Copies elements from device memory to host-side range
template <typename OutputIterator, typename T>
void insert_to_host(OutputIterator begin, OutputIterator end, T const* device_begin) {
size_t elements = end - begin;
copy_to_host(&*begin, device_begin, elements);
}
/// Copies elements to device memory from host-side range
template <typename T, typename InputIterator>
void insert_to_device(T* device_begin, InputIterator begin, InputIterator end) {
size_t elements = end - begin;
copy_to_device(device_begin, &*begin, elements);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device_memory
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
class DeviceAllocation {
public:
/// Delete functor for CUDA device memory
struct deleter {
void operator()(T* ptr) {
cudaError_t cuda_error = (cudaFree(ptr));
if (cuda_error != cudaSuccess) {
// noexcept
// throw cuda_exception("cudaFree() failed", cuda_error);
return;
}
}
};
public:
//
// Data members
//
/// Number of elements of T allocated on the current CUDA device
size_t capacity;
/// Smart pointer
platform::unique_ptr<T, deleter> smart_ptr;
public:
//
// Static methods
//
/// Static member to compute the number of bytes needed for a given number of elements
static size_t bytes(size_t elements) {
if (sizeof_bits<T>::value < 8) {
size_t const kElementsPerByte = 8 / sizeof_bits<T>::value;
return elements / kElementsPerByte;
}
else {
size_t const kBytesPerElement = sizeof_bits<T>::value / 8;
return elements * kBytesPerElement;
}
}
public:
//
// Methods
//
/// Constructor: allocates no memory
DeviceAllocation() : capacity(0) {}
/// Constructor: allocates \p capacity elements on the current CUDA device
DeviceAllocation(size_t _capacity) :
smart_ptr(device_memory::allocate<T>(_capacity)), capacity(_capacity) {}
/// Constructor: allocates \p capacity elements on the current CUDA device taking ownership of the allocation
DeviceAllocation(T *ptr, size_t _capacity) : smart_ptr(ptr), capacity(_capacity) {}
/// Copy constructor
DeviceAllocation(DeviceAllocation const &p):
smart_ptr(device_memory::allocate<T>(p.capacity)), capacity(p.capacity) {
device_memory::copy_device_to_device(smart_ptr.get(), p.get(), capacity);
}
/// Move constructor
DeviceAllocation(DeviceAllocation &&p): capacity(0) {
std::swap(smart_ptr, p.smart_ptr);
std::swap(capacity, p.capacity);
}
/// Destructor
~DeviceAllocation() { reset(); }
/// Returns a pointer to the managed object
T* get() const { return smart_ptr.get(); }
/// Releases the ownership of the managed object (without deleting) and resets capacity to zero
T* release() {
capacity = 0;
return smart_ptr.release();
}
/// Deletes the managed object and resets capacity to zero
void reset() {
capacity = 0;
smart_ptr.reset();
}
/// Deletes managed object, if owned, and allocates a new object
void reset(size_t _capacity) {
reset(device_memory::allocate<T>(_capacity), _capacity);
}
/// Deletes managed object, if owned, and replaces its reference with a given pointer and capacity
void reset(T* _ptr, size_t _capacity) {
smart_ptr.reset(_ptr);
capacity = _capacity;
}
/// Allocates a new buffer and copies the old buffer into it. The old buffer is then released.
void reallocate(size_t new_capacity) {
platform::unique_ptr<T, deleter> new_allocation(device_memory::allocate<T>(new_capacity));
device_memory::copy_device_to_device(
new_allocation.get(),
smart_ptr.get(),
std::min(new_capacity, capacity));
std::swap(smart_ptr, new_allocation);
std::swap(new_capacity, capacity);
}
/// Returns the number of elements
size_t size() const {
return capacity;
}
/// Returns the number of bytes needed to store the allocation
size_t bytes() const {
return bytes(capacity);
}
/// Returns a pointer to the object owned by *this
T* operator->() const { return smart_ptr.get(); }
/// Returns the deleter object which would be used for destruction of the managed object.
deleter& get_deleter() { return smart_ptr.get_deleter(); }
/// Returns the deleter object which would be used for destruction of the managed object (const)
const deleter& get_deleter() const { return smart_ptr.get_deleter(); }
/// Copies a device-side memory allocation
DeviceAllocation & operator=(DeviceAllocation const &p) {
if (capacity != p.capacity) {
smart_ptr.reset(device_memory::allocate<T>(p.capacity));
capacity = p.capacity;
}
device_memory::copy_device_to_device(smart_ptr.get(), p.get(), capacity);
return *this;
}
/// Move assignment
DeviceAllocation & operator=(DeviceAllocation && p) {
std::swap(smart_ptr, p.smart_ptr);
std::swap(capacity, p.capacity);
return *this;
}
/// Copies the entire allocation from another location in device memory.
void copy_from_device(T const *ptr) const {
copy_from_device(ptr, capacity);
}
/// Copies a given number of elements from device memory
void copy_from_device(T const *ptr, size_t elements) const {
device_memory::copy_device_to_device(get(), ptr, elements);
}
void copy_to_device(T *ptr) const {
copy_to_device(ptr, capacity);
}
void copy_to_device(T *ptr, size_t elements) const {
device_memory::copy_device_to_device(ptr, get(), elements);
}
void copy_from_host(T const *ptr) const {
copy_from_host(ptr, capacity);
}
void copy_from_host(T const *ptr, size_t elements) const {
device_memory::copy_to_device(get(), ptr, elements);
}
void copy_to_host(T *ptr) const {
copy_to_host(ptr, capacity);
}
void copy_to_host(T *ptr, size_t elements) const {
device_memory::copy_to_host(ptr, get(), elements);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace device_memory {
/// Device allocation abstraction that tracks size and capacity
template <typename T>
using allocation = cutlass::DeviceAllocation<T>;
} // namespace device_memory
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 10,561 | C | 30.156342 | 111 | 0.624751 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/host_uncompress.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief uncompress sparse matrix from the host side
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/tensor_view.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
// uncompress sparse tensor core A matrix
template <typename ElementA, typename LayoutA, typename ElementE,
typename LayoutE>
void uncompress(TensorRef<ElementA, LayoutA> uncompressed_tensor_a,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementE, LayoutE> tensor_e, int row, int col) {
// How many uncompressed data we can get with ElementE meta data
int DecompressedElementsPerElementE =
256 / cutlass::sizeof_bits<ElementA>::value;
// Process 4bit meta data a time
int step;
// 1:2 or 2:4 or 4:8
int a, b;
if (cutlass::sizeof_bits<ElementA>::value == 4) {
step = 8;
a = 4;
b = 8;
} else if (cutlass::sizeof_bits<ElementA>::value == 8) {
step = 4;
a = 2;
b = 4;
} else if (cutlass::sizeof_bits<ElementA>::value == 16) {
step = 4;
a = 2;
b = 4;
} else if (cutlass::sizeof_bits<ElementA>::value == 32) {
step = 2;
a = 1;
b = 2;
}
int ElementsPerE = (cutlass::sizeof_bits<ElementA>::value == 4) ? 2 : 1;
for (int r = 0; r < row; ++r) {
for (int c = 0; c < (col / DecompressedElementsPerElementE); ++c) {
ElementE meta = tensor_e.at(MatrixCoord(r, c));
for (int i = 0; i < DecompressedElementsPerElementE; i += step) {
int e = (meta >> (i / step * 4)) & 0xf;
int idx0 = e & 0x3;
int idx1 = e >> 2;
if (a == 1) idx0 = idx0 / 2;
for (int ii = 0; ii < step; ii += ElementsPerE) {
int real_col =
c * DecompressedElementsPerElementE + i + ii;
int compressed_col = (real_col / b) * a;
if (ii == (idx0 * ElementsPerE)) {
uncompressed_tensor_a.at(MatrixCoord(r, real_col)) =
tensor_a.at(MatrixCoord(r, compressed_col));
if (ElementsPerE == 2)
uncompressed_tensor_a.at(MatrixCoord(r, real_col + 1)) =
tensor_a.at(MatrixCoord(r, compressed_col + 1));
} else if ((ii == (idx1 * ElementsPerE)) && (a != 1)) {
uncompressed_tensor_a.at(MatrixCoord(r, real_col)) =
tensor_a.at(MatrixCoord(r, compressed_col + ElementsPerE));
if (ElementsPerE == 2)
uncompressed_tensor_a.at(MatrixCoord(r, real_col + 1)) =
tensor_a.at(
MatrixCoord(r, compressed_col + ElementsPerE + 1));
} else {
uncompressed_tensor_a.at(MatrixCoord(r, real_col)) =
ElementA(0);
if (ElementsPerE == 2)
uncompressed_tensor_a.at(MatrixCoord(r, real_col + 1)) =
ElementA(0);
}
}
}
}
}
}
// uncompress ELL block sparse matrix
template <typename ElementA, typename LayoutA,
typename ElementE, typename LayoutE>
void uncompress_ell_block_sparse(
TensorRef<ElementA, LayoutA> uncompressed_tensor_a,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementE, LayoutE> ell_idx,
int rows, int cols,
int ell_num_cols, int ell_blocksize) {
for (int r = 0; r < rows / ell_blocksize; ++r) {
for (int c = 0; c < ell_num_cols / ell_blocksize; ++c) {
ElementE idx = ell_idx.at(MatrixCoord(r, c));
if (idx != -1) {
int row_begin = r * ell_blocksize;
int col_begin_real = idx * ell_blocksize;
int col_begin = c * ell_blocksize;
for (int i = 0; i < ell_blocksize; ++i) {
for (int j = 0; j < ell_blocksize; ++j) {
uncompressed_tensor_a.at(MatrixCoord(row_begin + i, col_begin_real + j)) =
tensor_a.at(
MatrixCoord(row_begin + i, col_begin +j));
}
}
}
}
}
}
} // namespace cutlass
| 5,890 | C | 36.28481 | 100 | 0.595416 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/type_traits.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Type traits for common CUDA types
*/
#pragma once
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include <stdint.h>
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
namespace cutlass {
struct half_t;
template <typename T>
struct TypeTraits {
typedef T host_type;
typedef T device_type;
static inline T remove_negative_zero(T x) { return x; }
static inline T to_print(T x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<int8_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef int8_t host_type;
typedef int8_t device_type;
typedef int8_t integer_type;
typedef uint8_t unsigned_type;
static inline int8_t remove_negative_zero(int8_t x) { return x; }
static inline int to_print(int8_t x) { return (int)x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<uint8_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef uint8_t host_type;
typedef uint8_t device_type;
typedef uint8_t integer_type;
typedef uint8_t unsigned_type;
static inline uint8_t remove_negative_zero(uint8_t x) { return x; }
static inline uint32_t to_print(uint8_t x) { return (uint32_t)x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<int> {
static cudaDataType_t const cublas_type = CUDA_R_32I;
typedef int host_type;
typedef int device_type;
typedef int32_t integer_type;
typedef uint32_t unsigned_type;
static inline int32_t remove_negative_zero(int32_t x) { return x; }
static inline int to_print(int x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<unsigned> {
static cudaDataType_t const cublas_type = CUDA_R_32I;
typedef unsigned host_type;
typedef unsigned device_type;
typedef uint32_t integer_type;
typedef uint32_t unsigned_type;
static inline uint32_t remove_negative_zero(uint32_t x) { return x; }
static inline uint32_t to_print(uint32_t x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<int64_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef int64_t host_type;
typedef int64_t device_type;
typedef int64_t integer_type;
typedef uint64_t unsigned_type;
static inline int64_t remove_negative_zero(int64_t x) { return x; }
static inline int64_t to_print(int64_t x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<uint64_t> {
static cudaDataType_t const cublas_type = CUDA_R_8I;
typedef uint64_t host_type;
typedef uint64_t device_type;
typedef uint64_t integer_type;
typedef uint64_t unsigned_type;
static inline uint64_t remove_negative_zero(uint64_t x) { return x; }
static inline uint64_t to_print(uint64_t x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<half_t> {
static cudaDataType_t const cublas_type = CUDA_R_16F;
typedef half_t host_type;
typedef half_t device_type;
typedef int16_t integer_type;
typedef uint16_t unsigned_type;
static inline half_t remove_negative_zero(half_t x) {
return (x.raw() == 0x8000 ? half_t::bitcast(0) : x);
}
static inline half_t to_print(half_t x) { return x; }
static inline device_type to_device(half_t x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<float> {
static cudaDataType_t const cublas_type = CUDA_R_32F;
typedef float host_type;
typedef float device_type;
typedef int32_t integer_type;
typedef uint32_t unsigned_type;
static inline float remove_negative_zero(float x) { return x == -0.f ? 0.f : x; }
static inline float to_print(float x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
template <>
struct TypeTraits<double> {
static cudaDataType_t const cublas_type = CUDA_R_64F;
typedef double host_type;
typedef double device_type;
typedef int64_t integer_type;
typedef uint64_t unsigned_type;
static inline double remove_negative_zero(double x) { return x == -0.0 ? 0.0 : x; }
static inline double to_print(double x) { return x; }
static inline device_type to_device(host_type x) { return x; }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Complex types
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <>
struct TypeTraits<complex<half> > {
static cudaDataType_t const cublas_type = CUDA_C_16F;
typedef complex<half_t> host_type;
typedef complex<half> device_type;
typedef int16_t integer_type;
typedef uint16_t unsigned_type;
static inline device_type to_device(complex<half> x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<complex<half_t> > {
static cudaDataType_t const cublas_type = CUDA_C_16F;
typedef complex<half_t> host_type;
typedef complex<half> device_type;
typedef int16_t integer_type;
typedef uint16_t unsigned_type;
static inline complex<half_t> remove_negative_zero(complex<half_t> x) {
return complex<half_t>(
real(x) == -0_hf ? 0_hf : real(x),
imag(x) == -0_hf ? 0_hf : imag(x)
);
}
static inline complex<half_t> to_print(complex<half_t> x) { return x; }
static inline device_type to_device(complex<half_t> x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<complex<float> > {
static cudaDataType_t const cublas_type = CUDA_C_32F;
typedef complex<float> host_type;
typedef complex<float> device_type;
typedef int64_t integer_type;
typedef uint64_t unsigned_type;
static inline complex<float> remove_negative_zero(complex<float> x) {
return complex<float>(
real(x) == -0.f ? 0.f : real(x),
imag(x) == -0.f ? 0.f : imag(x)
);
}
static inline complex<float> to_print(complex<float> x) { return x; }
static inline device_type to_device(complex<float> x) { return reinterpret_cast<device_type const &>(x); }
};
template <>
struct TypeTraits<complex<double> > {
static cudaDataType_t const cublas_type = CUDA_C_64F;
typedef complex<double> host_type;
typedef complex<double> device_type;
struct integer_type { int64_t real, imag; };
struct unsigned_type { uint64_t real, imag; };
static inline complex<double> remove_negative_zero(complex<double> x) {
return complex<double>(
real(x) == -0.0 ? 0.0 : real(x),
imag(x) == -0.0 ? 0.0 : imag(x)
);
}
static inline complex<double> to_print(complex<double> x) { return x; }
static inline device_type to_device(complex<double> x) { return reinterpret_cast<device_type const &>(x); }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 8,809 | C | 35.861925 | 109 | 0.673289 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/detail/linear_to_coordinate.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace detail {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int Rank, int Index>
struct LinearToCoordinateHelper {
CUTLASS_HOST_DEVICE
void operator()(Coord<Rank> &coord, int64_t idx, Coord<Rank> const &extent) const {
int64_t prod = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank - Index; i < Rank; ++i) {
prod *= int64_t(extent[i]);
}
coord[Rank - Index - 1] = int(idx / prod);
int64_t residual = idx % prod;
LinearToCoordinateHelper<Rank, Index - 1>()(coord, residual, extent);
}
};
template <int Rank>
struct LinearToCoordinateHelper<Rank, 0> {
CUTLASS_HOST_DEVICE
void operator()(Coord<Rank> &coord, int64_t idx, Coord<Rank> const &extent) const {
coord[Rank - 1] = int(idx);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int Rank>
struct LinearToCoordinate {
CUTLASS_HOST_DEVICE
void operator()(Coord<Rank> &coord, int64_t idx, Coord<Rank> const &extent) const {
LinearToCoordinateHelper<Rank, Rank - 1>()(coord, idx, extent);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
} // namespace reference
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 3,527 | C | 36.136842 | 100 | 0.586901 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/detail/inner_product.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
namespace cutlass {
namespace reference {
namespace detail {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Template function to compute an inner product.
#pragma hd_warning_disable // Suppresses warnings when attempting to instantiate with a
// host-only type
template <typename Atype, typename Btype, typename Ctype>
CUTLASS_HOST_DEVICE
Ctype inner_product(Atype a, Btype b, Ctype c) {
return Ctype(a) * Ctype(b) + c;
}
/// Specialization for matrix multiplication with binary operands
template <>
CUTLASS_HOST_DEVICE
int inner_product<Array<bin1_t, 32>, Array<bin1_t, 32>, int>(
Array<bin1_t, 32> a,
Array<bin1_t, 32> b,
int c) {
int accum = 0;
for (int bit = 0; bit < 32; bit++) {
accum += a[bit] ^ b[bit];
}
return accum + c;
}
/*
/// Specialization for matrix multiplication with signed 4-bit integer operands
template <>
CUTLASS_HOST_DEVICE
int inner_product<Array<int4b_t, 8>, Array<int4b_t, 8>, int>(
Array<int4b_t, 8> a,
Array<int4b_t, 8> b,
int c) {
int accum = 0;
for (int k = 0; k < 8; k++) {
accum += a[k] * b[k];
}
return accum + c;
}
/// Specialization for matrix multiplication with unsigned 4-bit integer operands
template <>
CUTLASS_HOST_DEVICE
int inner_product<Array<uint4b_t, 8>, Array<uint4b_t, 8>, int>(
Array<uint4b_t, 8> a,
Array<uint4b_t, 8> b,
int c) {
int accum = 0;
for (int k = 0; k < 8; k++) {
accum += a[k] * b[k];
}
return accum + c;
}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename SrcType, typename DstType>
struct Cast {
// Default behavior: convert to the destination type
#pragma hd_warning_disable // Suppresses warnings when attempting to instantiate complex<T> with a
// host-only type
CUTLASS_HOST_DEVICE
static DstType apply(SrcType src) { return static_cast<DstType>(src); };
};
template <>
struct Cast<float, int8_t> {
CUTLASS_HOST_DEVICE
static int8_t apply(float src) {
// Clamp to the range of signed 8-bit integers.
return static_cast<int8_t>(fmaxf(-128.f, fminf(127.f, src)));
};
};
template <>
struct Cast<float, uint8_t> {
CUTLASS_HOST_DEVICE
static uint8_t apply(float src) {
// Clamp to the range of signed 8-bit integers.
return static_cast<uint8_t>(fmaxf(0.f, fminf(255.f, src)));
};
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace detail
} // namespace reference
} // namespace cutlass
| 4,606 | C | 32.875 | 100 | 0.623317 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/rank_2k.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for Rank 2k update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_rank2k(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2,
"Tensors must be of rank 2");
static_assert(
FillModeC == FillMode::kLower ||
FillModeC == FillMode::kUpper,
"Fill Mode can either be Lower or Upper.");
using CompareOp = typename platform::conditional<(FillModeC == FillMode::kLower),
std::greater_equal<int>,
std::less_equal<int>>::type;
// Note: batch is ignored.
// Note: M is same as N for Rank 2k update
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp compare_op;
for (int row_block = 0; row_block < N; row_block += Nblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Nblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < N && col < N && compare_op(row, col))
{
// A x B^T
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b_t = tensor_b.at(MatrixCoord(col, k_block));
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b_t(cast_if_scalar<ComputeType>(b_t));
accum[i][j] = inner_product_op(compute_a, compute_b_t, accum[i][j]);
// B x A^T
ElementB b = tensor_b.at(MatrixCoord(row, k_block));
ElementA a_t = tensor_a.at(MatrixCoord(col, k_block));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
ComputeType compute_a_t(cast_if_scalar<ComputeType>(a_t));
accum[i][j] = inner_product_op(compute_b, compute_a_t, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Nblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < N && col < N &&
( (FillModeC == FillMode::kLower && row >= col) ||
(FillModeC == FillMode::kUpper && row <= col) )
) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general Rank 2k update (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_rank2k(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
FillMode FillModeC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Rank2K;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC, FillMode FillModeC,
typename ScalarType, typename ComputeType>
struct Rank2K<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_rank2k<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, FillModeC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 9,441 | C | 35.038168 | 100 | 0.598559 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/error_metrics.h |
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/util/reference/host/tensor_reduce.h"
#include "cutlass/core_io.h"
namespace cutlass {
namespace reference {
namespace host {
/// Helper to compute the relative error metric for tensor A_computed w.r.t. to tensor A_reference
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorRelativeErrorMetric(
TensorView<Element, Layout> view_A_computed,
TensorView<Element, Layout> view_B_reference,
ComputeType identity = ComputeType()
) {
return cutlass::reference::host::TensorNormDiff(view_A_computed, view_B_reference, identity) /
cutlass::reference::host::TensorNorm(view_B_reference, identity);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 2,766 | C | 40.298507 | 100 | 0.68402 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/gemm_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_ref_planar_complex.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<complex<ComputeType>>
>
void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d,
complex<ComputeType> initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
using ComplexA = typename TensorRefPlanarComplex<ElementA, LayoutA>::ComplexElement;
using ComplexB = typename TensorRefPlanarComplex<ElementB, LayoutB>::ComplexElement;
using ComplexC = typename TensorRefPlanarComplex<ElementC, LayoutC>::ComplexElement;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
complex<ComputeType> accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ComplexA a_ik = tensor_a.at(MatrixCoord(row, k_block));
ComplexB b_kj = tensor_b.at(MatrixCoord(k_block, col));
complex<ComputeType> a = complex<ComputeType>{
ComputeType(a_ik.real()),
ComputeType(a_ik.imag())
};
complex<ComputeType> b = complex<ComputeType>{
ComputeType(b_kj.real()),
ComputeType(b_kj.imag())
};
if (transform_a == ComplexTransform::kConjugate) {
a = conj(a);
}
if (transform_b == ComplexTransform::kConjugate) {
b = conj(b);
}
accum[i][j] = inner_product_op(a, b, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
complex<ScalarType> acc{
ScalarType(accum[i][j].real()),
ScalarType(accum[i][j].imag())
};
ComplexC d_ij = tensor_c.at(coord);
complex<ScalarType> src{
ScalarType(d_ij.real()),
ScalarType(d_ij.imag())
};
complex<ScalarType> result = alpha * acc + beta * src;
d_ij.real() = convert_op(result.real());
d_ij.imag() = convert_op(result.imag());
tensor_d.at(coord) = d_ij;
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType
>
void GemmPlanarComplex(
gemm::GemmCoord problem_size,
complex<ScalarType> alpha,
TensorRefPlanarComplex<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRefPlanarComplex<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
complex<ScalarType> beta,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_c,
TensorRefPlanarComplex<ElementC, LayoutC> tensor_d) {
GemmPlanarComplex(
problem_size,
alpha,
tensor_a, transform_a,
tensor_b, transform_b,
beta,
tensor_c,
tensor_d,
complex<ScalarType>());
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 7,708 | C | 32.663755 | 100 | 0.605475 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/tensor_foreach.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdexcept>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines several helpers
namespace detail {
/// Helper to perform for-each operation
template <typename Func, int Rank, int RankRemaining>
struct TensorForEachHelper {
/// Index of the active rank
static int const kActiveRank = Rank - RankRemaining - 1;
/// Constructor for general rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
TensorForEachHelper<Func, Rank, RankRemaining - 1>(func, extent, coord);
}
}
};
/// Helper to perform for-each operation
template <typename Func, int Rank>
struct TensorForEachHelper<Func, Rank, 0> {
/// Index of the active rank
static int const kActiveRank = Rank - 1;
/// Constructor for fastest chaning rank
TensorForEachHelper(
Func &func,
Coord<Rank> const &extent,
Coord<Rank> &coord) {
for (int i = 0; i < extent.at(kActiveRank); ++i) {
coord[kActiveRank] = i;
func(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEach(Coord<Rank> extent, Func & func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Iterates over the index space of a tensor and calls a C++ lambda
template <
typename Func, ///< function applied to each point in a tensor's index space
int Rank> ///< rank of index space
void TensorForEachLambda(Coord<Rank> extent, Func func) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, extent, coord);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
struct BlockForEach {
/// Constructor performs the operation.
BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params = typename Func::Params()) {
Func func(params);
for (size_t index = 0; index < capacity; ++index) {
ptr[index] = func();
}
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| 4,756 | C | 34.237037 | 100 | 0.588099 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/trmm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for TRMM in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
/// Computes a Triangular Matrix Multiplication (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
DiagType DiagTypeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_trmm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
static_assert(SideModeA != SideMode::kInvalid
, "Side Mode can either be Left or Right.");
static_assert(FillModeA == FillMode::kLower || FillModeA == FillMode::kUpper
, "Fill Mode can either be Lower or Upper.");
using CompareOp = typename TrMatrixCompareOp<FillModeA, DiagTypeA>::Type;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
// Assuming correct k-dimension value is passed
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp compare_op;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = ElementA();
ElementB b = ElementB();
if (SideModeA == SideMode::kLeft) {
a = (compare_op(row, k_block)) ?
(tensor_a.at(MatrixCoord(row, k_block))) : ElementA(0);
if (row == k_block && DiagTypeA == DiagType::kUnit) {
a = ElementA(1);
}
b = tensor_b.at(MatrixCoord(k_block, col));
} else if (SideModeA == SideMode::kRight) {
a = tensor_b.at(MatrixCoord(row, k_block));
b = (compare_op(k_block, col)) ?
tensor_a.at(MatrixCoord(k_block, col)) : ElementA(0);
if (k_block == col && DiagTypeA == DiagType::kUnit) {
b = ElementA(1);
}
}
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
DiagType DiagTypeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Trmm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, SideMode SideModeA,
FillMode FillModeA, DiagType DiagTypeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Trmm<ElementA, LayoutA, SideModeA, FillModeA, DiagTypeA, ElementB, LayoutB,
ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_trmm<ElementA, LayoutA, SideModeA, FillModeA, DiagTypeA, ElementB, LayoutB,
ElementC, LayoutC, ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 7,670 | C | 34.513889 | 100 | 0.594654 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/tensor_compare.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Standard Library includes
#include <utility>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/util/distribution.h"
//#include "cutlass/util/type_traits.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorEqualsFunc {
//
// Data members
//
TensorView<Element, Layout> lhs;
TensorView<Element, Layout> rhs;
bool result;
/// Ctor
TensorEqualsFunc(): result(true) { }
/// Ctor
TensorEqualsFunc(
TensorView<Element, Layout> const &lhs_,
TensorView<Element, Layout> const &rhs_
) :
lhs(lhs_), rhs(rhs_), result(true) { }
/// Visits a coordinate
void operator()(Coord<Layout::kRank> const &coord) {
Element lhs_ = lhs.at(coord);
Element rhs_ = rhs.at(coord);
if (lhs_ != rhs_) {
result = false;
}
}
/// Returns true if equal
operator bool() const {
return result;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two tensor views are equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorEquals(
TensorView<Element, Layout> const &lhs,
TensorView<Element, Layout> const &rhs) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> func(lhs, rhs);
TensorForEach(
lhs.extent(),
func
);
return bool(func);
}
/// Returns true if two tensor views are equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorEquals(
TensorViewPlanarComplex<Element, Layout> const &lhs,
TensorViewPlanarComplex<Element, Layout> const &rhs) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> real_func(
{lhs.data(), lhs.layout(), lhs.extent()},
{rhs.data(), rhs.layout(), rhs.extent()}
);
TensorForEach(
lhs.extent(),
real_func
);
if (!bool(real_func)) {
return false;
}
detail::TensorEqualsFunc<Element, Layout> imag_func(
{lhs.data() + lhs.imaginary_stride(), lhs.layout(), lhs.extent()},
{rhs.data() + rhs.imaginary_stride(), rhs.layout(), rhs.extent()}
);
TensorForEach(
lhs.extent(),
imag_func
);
return bool(imag_func);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if two tensor views are NOT equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorNotEquals(
TensorView<Element, Layout> const &lhs,
TensorView<Element, Layout> const &rhs) {
// Extents must be identical
if (lhs.extent() != rhs.extent()) {
return true;
}
detail::TensorEqualsFunc<Element, Layout> func(lhs, rhs);
TensorForEach(
lhs.extent(),
func
);
return !bool(func);
}
/// Returns true if two tensor views are equal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorNotEquals(
TensorViewPlanarComplex<Element, Layout> const &lhs,
TensorViewPlanarComplex<Element, Layout> const &rhs) {
return !TensorEquals(lhs, rhs);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorContainsFunc {
//
// Data members
//
TensorView<Element, Layout> view;
Element value;
bool contains;
Coord<Layout::kRank> location;
//
// Methods
//
/// Ctor
TensorContainsFunc(): contains(false) { }
/// Ctor
TensorContainsFunc(
TensorView<Element, Layout> const &view_,
Element value_
) :
view(view_), value(value_), contains(false) { }
/// Visits a coordinate
void operator()(Coord<Layout::kRank> const &coord) {
if (view.at(coord) == value) {
if (!contains) {
location = coord;
}
contains = true;
}
}
/// Returns true if equal
operator bool() const {
return contains;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if a value is present in a tensor
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
bool TensorContains(
TensorView<Element, Layout> const & view,
Element value) {
detail::TensorContainsFunc<Element, Layout> func(
view,
value
);
TensorForEach(
view.extent(),
func
);
return bool(func);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns a pair containing a boolean of whether a value exists in a tensor and the location of
/// of the first occurrence. If the value is not contained in the tensor, the second element of the
/// pair is undefined.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
std::pair<bool, Coord<Layout::kRank> > TensorFind(
TensorView<Element, Layout> const & view,
Element value) {
detail::TensorContainsFunc<Element, Layout> func(
view,
value
);
TensorForEach(
view.extent(),
func
);
return std::make_pair(bool(func), func.location);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 8,440 | C | 26.584967 | 100 | 0.561137 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/gemm_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/matrix_coord.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename ConvertOp = NumericConverter<ElementC, ScalarType>,
typename InnerProductOp = multiply_add<ComputeType>
>
void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) {
// Compute matrix product using blocks
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b = tensor_b.at(MatrixCoord(k_block, col));
ComputeType a_ik = ComputeType(a);
ComputeType b_kj = ComputeType(b);
if (transform_a == ComplexTransform::kConjugate) {
a_ik = conj(a_ik);
}
if (transform_b == ComplexTransform::kConjugate) {
b_kj = conj(b_kj);
}
accum[i][j] = inner_product_op(a_ik, b_kj, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
} // for (col_block)
} // for (row_block)
tensor_a.add_pointer_offset(batch_stride_A);
tensor_b.add_pointer_offset(batch_stride_B);
tensor_c.add_pointer_offset(batch_stride_C);
tensor_d.add_pointer_offset(batch_stride_D);
} // for (batch_idx)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType
>
void GemmComplex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
ComplexTransform transform_a,
TensorRef<ElementB, LayoutB> tensor_b,
ComplexTransform transform_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d) {
GemmComplex(problem_size, alpha, tensor_a, transform_a, tensor_b, transform_b, beta, tensor_c, tensor_d, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 7,096 | C | 33.120192 | 122 | 0.611189 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/tensor_fill.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Provides several functions for filling tensors with data.
*/
#pragma once
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/quaternion.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/tensor_view.h"
#include "cutlass/tensor_view_planar_complex.h"
#include "cutlass/blas3.h"
#include "cutlass/util/distribution.h"
#include "tensor_foreach.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element value;
//
// Methods
//
TensorFillFunc(
TensorView const &view_ = TensorView(),
Element value_ = Element(0)
): view(view_), value(value_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
view.at(coord) = value;
}
};
/// Returns a pair of values of the Gaussian distribution generated by the Box Muller method
struct BoxMullerFunc {
BoxMullerFunc() {}
void operator()(
double* rnd, ///< Size-2 vector to be filled with random values
double mean = 0, ///< Mean of the Gaussian distribution
double stddev = 1, ///< Standard deviation of the Gaussian distribution
double pi = std::acos(-1)) const {
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
rnd[0] = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd[1] = std::sqrt(-2 * std::log(u1)) * std::sin(2 * pi * u2);
rnd[0] = mean + stddev * rnd[0];
rnd[1] = mean + stddev * rnd[1];
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorView<Element, Layout> dst, ///< destination tensor
Element val = Element(0)) { ///< value to uniformly fill it with
detail::TensorFillFunc<Element, Layout> func(dst, val);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
cutlass::complex<Element> val = cutlass::complex<Element>(0)) { ///< value to uniformly fill it with
TensorFill(dst.view_real(), val.real());
TensorFill(dst.view_imag(), val.imag());
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomGaussianFunc {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
// Box-Muller transform to generate random numbers with Normal distribution
double u1 = double(std::rand()) / double(RAND_MAX);
double u2 = double(std::rand()) / double(RAND_MAX);
// Compute Gaussian random value
double rnd = std::sqrt(-2 * std::log(u1)) * std::cos(2 * pi * u2);
rnd = mean + stddev * rnd;
// Scale and convert final result
Element result;
if (int_scale >= 0) {
rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(rnd);
}
else {
result = static_cast<Element>(rnd);
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomGaussianFunc<complex<Element> > {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
double rnd[2];
detail::BoxMullerFunc func;
func(rnd, mean, stddev, pi);
if (int_scale >= 0) {
rnd[0] = double(int(rnd[0] * double(1 << int_scale)));
rnd[1] = double(int(rnd[1] * double(1 << int_scale)));
reals[0] = from_real<Element>(rnd[0] / double(1 << int_scale));
reals[1] = from_real<Element>(rnd[1] / double(1 << int_scale));
} else {
reals[0] = from_real<Element>(rnd[0]);
reals[1] = from_real<Element>(rnd[1]);
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomGaussianFunc<Quaternion<Element> > {
uint64_t seed;
double mean;
double stddev;
int int_scale;
double pi;
//
// Methods
//
RandomGaussianFunc(
uint64_t seed_ = 0,
double mean_ = 0,
double stddev_ = 1,
int int_scale_ = -1
):
seed(seed_), mean(mean_), stddev(stddev_), int_scale(int_scale_), pi(std::acos(-1)) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
double rnd1[2];
double rnd2[2];
detail::BoxMullerFunc func;
func(rnd1, mean, stddev, pi);
func(rnd2, mean, stddev, pi);
if (int_scale >= 0) {
rnd1[0] = double(int(rnd1[0] * double(1 << int_scale)));
rnd1[1] = double(int(rnd1[1] * double(1 << int_scale)));
rnd2[0] = double(int(rnd2[0] * double(1 << int_scale)));
rnd2[1] = double(int(rnd2[1] * double(1 << int_scale)));
reals[0] = from_real<Element>(rnd1[0] / double(1 << int_scale));
reals[1] = from_real<Element>(rnd1[1] / double(1 << int_scale));
reals[2] = from_real<Element>(rnd2[0] / double(1 << int_scale));
reals[3] = from_real<Element>(rnd2[1] / double(1 << int_scale));
} else {
reals[0] = from_real<Element>(rnd1[0]);
reals[1] = from_real<Element>(rnd1[1]);
reals[2] = from_real<Element>(rnd2[0]);
reals[3] = from_real<Element>(rnd2[1]);
}
return Quaternion<Element>(reals[0], reals[1], reals[2], reals[3]);
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillGaussianFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomGaussianFunc<Element> func;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillGaussianFunc(
TensorView view_ = TensorView(),
RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
/// Computes a random Gaussian distribution for a rank-2 tensor
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillSymmetricGaussianFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomGaussianFunc<Element> func;
cutlass::FillMode fill_mode;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillSymmetricGaussianFunc(
TensorView view_ = TensorView(),
RandomGaussianFunc<Element> func_ = RandomGaussianFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid
):
view(view_), func(func_), fill_mode(fill_mode_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kLower &&
coord[0] >= coord[1]) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
coord[0] <= coord[1]) {
view.at(coord) = func();
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits);
detail::TensorFillGaussianFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
TensorFillRandomGaussian(dst.view_real(), seed, mean, stddev, bits);
TensorFillRandomGaussian(dst.view_imag(), ~seed, mean, stddev, bits);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSymmetricRandomGaussian(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits);
detail::TensorFillSymmetricGaussianFunc<Element, Layout> func(
dst,
random_func,
fill_mode
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values of a Gaussian distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomGaussian(
Element *ptr, ///< destination buffer
size_t capacity, ///< number of elements
uint64_t seed, ///< seed for RNG
double mean = 0, ///< Gaussian distribution's mean
double stddev = 1, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomGaussianFunc<Element> random_func(seed, mean, stddev, bits);
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomUniformFunc {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Element operator()() const {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (int_scale >= 0) {
rnd = double(int64_t(rnd * double(1 << int_scale))) / double(1 << int_scale);
result = static_cast<Element>(Real(rnd));
}
else {
result = static_cast<Element>(Real(rnd));
}
return result;
}
};
/// Partial specialization for initializing a complex value.
template <typename Element>
struct RandomUniformFunc<complex<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
complex<Element> operator()() const {
Element reals[2];
for (int i = 0; i < 2; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return complex<Element>(reals[0], reals[1]);
}
};
/// Partial specialization for initializing a Quaternion value.
template <typename Element>
struct RandomUniformFunc<Quaternion<Element> > {
using Real = typename RealType<Element>::Type;
uint64_t seed;
double range;
double min;
int int_scale;
//
// Methods
//
RandomUniformFunc(
uint64_t seed_ = 0,
double max = 1,
double min_ = 0,
int int_scale_ = -1
):
seed(seed_), range(max - min_), min(min_), int_scale(int_scale_) {
std::srand((unsigned)seed);
}
/// Compute random value and update RNG state
Quaternion<Element> operator()() const {
Element reals[4];
for (int i = 0; i < 4; ++i) {
double rnd = double(std::rand()) / double(RAND_MAX);
rnd = min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (int_scale >= 0) {
rnd = double(int(rnd * double(1 << int_scale)));
reals[i] = from_real<Element>(Real(rnd / double(1 << int_scale)));
}
else {
reals[i] = from_real<Element>(Real(rnd));
}
}
return make_Quaternion(reals[0], reals[1], reals[2], reals[3]);
}
};
/// Computes a random uniform distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
/// Fills the upper or lower part of a symmetric rank-2 tensor with random values of a uniform distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillSymmetricRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
cutlass::FillMode fill_mode;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillSymmetricRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid
):
view(view_), func(func_), fill_mode(fill_mode_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kLower &&
coord[0] >= coord[1]) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
coord[0] <= coord[1]) {
view.at(coord) = func();
}
}
};
/// Computes a random Uniform distribution and pads diagonal with zeros
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillPadDiagonalRandomUniformFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomUniformFunc<Element> func;
cutlass::FillMode fill_mode;
int alignment;
//
// Methods
//
/// Construction of uniform RNG functor.
TensorFillPadDiagonalRandomUniformFunc(
TensorView view_ = TensorView(),
RandomUniformFunc<Element> func_ = RandomUniformFunc<Element>(),
cutlass::FillMode fill_mode_ = cutlass::FillMode::kInvalid,
int alignment_ = 1
):
view(view_), func(func_), fill_mode(fill_mode_), alignment(alignment_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
// Fill half of matrix based on FillMode
if (Layout::kRank == 2 &&
(fill_mode == cutlass::FillMode::kLower) &&
(coord[0] >= coord[1]) ||
((coord[1] - coord[0]) >= alignment)) {
view.at(coord) = func();
} else if (Layout::kRank == 2 &&
fill_mode == cutlass::FillMode::kUpper &&
(coord[0] <= coord[1]) ||
((coord[0] - coord[1]) >= alignment)) {
view.at(coord) = func();
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values of a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillRandomUniformFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values of a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorViewPlanarComplex<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
TensorFillRandomUniform(dst.view_real(), seed, max, min, bits);
TensorFillRandomUniform(dst.view_imag(), ~seed, max, min, bits);
}
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Quaternion<Element>, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Quaternion<Element>> random_func(seed, max, min, bits);
detail::TensorFillRandomUniformFunc<Quaternion<Element>, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSymmetricRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillSymmetricRandomUniformFunc<Element, Layout> func(
dst,
random_func,
fill_mode
);
TensorForEach(
dst.extent(),
func
);
}
/// Fills a tensor with random values with a uniform random distribution pads zeros along diagonal
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillPadDiagonalRandomUniform(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
cutlass::FillMode fill_mode, ///< FillMode for symmetric matrices
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1, ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
int alignment = 1
) {
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
detail::TensorFillPadDiagonalRandomUniformFunc<Element, Layout> func(
dst,
random_func,
fill_mode,
alignment
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomUniform(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
double max = 1, ///< upper bound of distribution
double min = 0, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
detail::RandomUniformFunc<Element> random_func(seed, max, min, bits);
for (size_t i = 0; i < capacity; ++i) {
ReferenceFactory<Element>::get(ptr, i) = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillDiagonalFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element diag;
Element other;
//
// Methods
//
TensorFillDiagonalFunc(
TensorView const &view_ = TensorView(),
Element diag_ = Element(1),
Element other_ = Element(0)
):
view(view_), diag(diag_), other(other_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
view.at(coord) = (is_diag ? diag : other);
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor everywhere with a unique value for its diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element diag = Element(1), ///< value to write in the diagonal
Element other = Element(0)) { ///< value to write off the diagonal
detail::TensorFillDiagonalFunc<Element, Layout> func(
dst,
diag,
other
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to fill a tensor's digonal with 1 and 0 everywhere else.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillIdentity(
TensorView<Element, Layout> dst) { ///< destination tensor
TensorFillDiagonal(dst, Element(1), Element(0));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element val = Element(1)) {
typename Layout::Index extent = dst.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
dst.at(coord) = val;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateOffDiagonalFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Element other;
//
// Methods
//
TensorUpdateOffDiagonalFunc(
TensorView const &view_ = TensorView(),
Element other_ = Element(0)
):
view(view_), other(other_) { }
void operator()(Coord<Layout::kRank> const & coord) const {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (!is_diag) {
view.at(coord) = other;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to all elements in the tensor without modifying diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateOffDiagonal(
TensorView<Element, Layout> dst, ///< destination tensor
Element other = Element(1)) {
detail::TensorUpdateOffDiagonalFunc<Element, Layout> func(
dst,
other
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillLinearFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
Array<Element, Layout::kRank> v;
Element s;
//
// Methods
//
TensorFillLinearFunc() { }
/// Constructs functor
TensorFillLinearFunc(
TensorView const &view_,
Array<Element, Layout::kRank> const & v_,
Element s_ = Element(0)
):
view(view_), v(v_), s(s_) { }
/// Updates the tensor
void operator()(Coord<Layout::kRank> const & coord) const {
Element sum(s);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
sum += Element(coord[i]) * v[i];
}
view.at(coord) = sum;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillLinear(
TensorView<Element, Layout> dst, ///< destination tensor
Array<Element, Layout::kRank> const & v,
Element s = Element(0)) {
detail::TensorFillLinearFunc<Element, Layout> func(
dst,
v,
s
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillSequential(
TensorView<Element, Layout> dst, ///< destination tensor
Element s = Element(0)) {
Array<Element, Layout::kRank> stride;
stride[0] = Element(1);
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
stride[i] = stride[i - 1] * Element(dst.extent()[i - 1]);
}
TensorFillLinear(dst, stride, s);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequential(
Element *ptr,
int64_t capacity,
Element v = Element(1),
Element s = Element(0)) {
int i = 0;
while (i < capacity) {
cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value <
8)>::get(ptr, i) = s;
s = Element(s + v);
++i;
}
}
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequentialModN(
Element *ptr,
int64_t capacity,
int64_t mod,
int64_t v = int64_t(1),
int64_t s = int64_t(0)) {
int i = 0;
while (i < capacity) {
cutlass::ReferenceFactory<Element, (cutlass::sizeof_bits<Element>::value <
8)>::get(ptr, i) = Element(s);
s = int64_t(s + v) % mod;
++i;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillRandom(
Element *ptr,
size_t capacity,
uint64_t seed,
Distribution dist) {
if (dist.kind == Distribution::Gaussian) {
BlockFillRandomGaussian<Element>(
ptr,
capacity,
seed,
dist.gaussian.mean,
dist.gaussian.stddev,
dist.int_scale);
}
else if (dist.kind == Distribution::Uniform) {
BlockFillRandomUniform<Element>(
ptr,
capacity,
seed,
dist.uniform.max,
dist.uniform.min,
dist.int_scale);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename Element>
struct RandomSparseMetaFunc {
uint64_t seed;
int range;
int MetaSizeInBits;
//
// Methods
//
RandomSparseMetaFunc(
uint64_t seed_ = 0,
int MetaSizeInBits_ = 2
):
seed(seed_), MetaSizeInBits(MetaSizeInBits_) {
std::srand((unsigned)seed);
if (MetaSizeInBits_ == 2) {
range = 6;
} else if (MetaSizeInBits_ == 4) {
range = 2;
}
}
/// Compute random value and update RNG state
Element operator()() const {
Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe};
Element TwoToOneMeta[2] = {0x4, 0xe};
Element * MetaArray = (MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta;
Element result = 0x0;
for (int i = 0; i < cutlass::sizeof_bits<Element>::value / 4; ++i) {
int rnd = std::rand() % range;
Element meta = MetaArray[rnd];
result = (Element)(result | ((Element)(meta << (i * 4))));
}
return result;
}
};
/// Computes a random sparse meta
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomSparseMetaFunc {
using TensorView = TensorView<Element, Layout>;
//
// Data members
//
TensorView view;
RandomSparseMetaFunc<Element> func;
//
// Methods
//
/// Construction of Gaussian RNG functor.
TensorFillRandomSparseMetaFunc(
TensorView view_ = TensorView(),
RandomSparseMetaFunc<Element> func_ = RandomSparseMetaFunc<Element>()
):
view(view_), func(func_) {
}
/// Compute random value and update RNG state
void operator()(Coord<Layout::kRank> const &coord) const {
view.at(coord) = func();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomSparseMeta(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
int MetaSizeInBits) { ///< 2 bit or 4 bit
detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits);
detail::TensorFillRandomSparseMetaFunc<Element, Layout> func(
dst,
random_func
);
TensorForEach(
dst.extent(),
func
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element ///< Element type
>
void BlockFillRandomSparseMeta(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
int MetaSizeInBits) { ///< 2 bit or 4bit
detail::RandomSparseMetaFunc<Element> random_func(seed, MetaSizeInBits);
for (size_t i = 0; i < capacity; ++i) {
ptr[i] = random_func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a ell block index matrix with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomEllIdx(
TensorView<Element, Layout> dst, ///< destination tensor
uint64_t seed, ///< seed for RNG
int rows, int ell_cols, int cols) { ///< dimension of the matrix
std::srand((unsigned)seed);
for (int i = 0; i < rows; ++i) {
int col_idx = std::rand() % cols;
for (int j = 0; j < ell_cols; ++j) {
dst.at({i, j}) = col_idx;
if (col_idx != -1) {
if (col_idx == (cols - 1)) {
col_idx = -1;
} else {
col_idx = std::rand() % (cols - col_idx - 1) + col_idx + 1;
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies a diagonal in from host memory without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalIn(
TensorView<Element, Layout> dst, ///< destination tensor
Element const *ptr) { ///< dense buffer of elements
typename Layout::Index extent = dst.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
dst.at(coord) = ReferenceFactory<Element>::get(ptr, i);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies the diagonal of a tensor into a dense buffer in host memory.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalOut(
Element *ptr, ///< dense buffer of elements
TensorView<Element, Layout> src) { ///< source tensor
typename Layout::Index extent = src.extent().min();
for (typename Layout::Index i = 0; i < extent; ++i) {
Coord<Layout::kRank> coord(i);
ReferenceFactory<Element>::get(ptr, i) = src.at(coord);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 43,961 | C | 28.926481 | 118 | 0.529538 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/tensor_elementwise.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to apply a binary operator in place
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementD,
typename LayoutD,
typename BinaryFunc>
struct TensorFuncBinaryOp {
//
// Data members
//
/// View of left-hand-side tensor
TensorView<ElementD, LayoutD> view_d;
TensorRef<ElementA, LayoutA> view_a;
TensorRef<ElementB, LayoutB> view_b;
BinaryFunc func;
//
// Methods
//
/// Constructor
TensorFuncBinaryOp() { }
/// Constructor
TensorFuncBinaryOp(
TensorView<ElementD, LayoutD> const & view_d_,
TensorRef<ElementA, LayoutA> const & view_a_,
TensorRef<ElementB, LayoutB> const & view_b_,
BinaryFunc func = BinaryFunc()
):
view_d(view_d_), view_a(view_a_), view_b(view_b_), func(func) { }
/// Equality check
void operator()(Coord<LayoutD::kRank> const &coord) const {
view_d.at(coord) = func(
ElementD(view_a.at(coord)),
ElementD(view_b.at(coord))
);
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Adds two tensors and stores in the destination tensor: d = a + b
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB
>
void TensorAdd(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a, ///< A tensor reference
TensorRef<ElementB, LayoutB> b ///< B tensor reference
) {
detail::TensorFuncBinaryOp<
ElementD,
LayoutD,
ElementA,
LayoutA,
ElementB,
LayoutB,
cutlass::plus<ElementD>
> func(d, a, b);
TensorForEach(
d.extent(),
func);
}
/// Adds a tensor in place: d = d .+ a
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA
>
void TensorAdd(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a ///< A tensor reference
) {
TensorAdd(d, d, a);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Subtracts two tensors and stores in the destination tensor: d = a - b
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB
>
void TensorSub(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a, ///< A tensor reference
TensorRef<ElementB, LayoutB> b ///< B tensor reference
) {
detail::TensorFuncBinaryOp<
ElementD,
LayoutD,
ElementA,
LayoutA,
ElementB,
LayoutB,
cutlass::minus<ElementD>
> func(d, a, b);
TensorForEach(
d.extent(),
func);
}
/// Subtracts two tensors in place: d = d .- a
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB
>
void TensorSub(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a ///< A tensor reference
) {
TensorSub(d, d, a);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Multiplies two tensors and stores in the destination tensor: d = a .* b
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB
>
void TensorMul(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a, ///< A tensor reference
TensorRef<ElementB, LayoutB> b ///< B tensor reference
) {
detail::TensorFuncBinaryOp<
ElementD,
LayoutD,
ElementA,
LayoutA,
ElementB,
LayoutB,
cutlass::multiplies<ElementD>
> func(d, a, b);
TensorForEach(
d.extent(),
func);
}
/// Multiplies tensors in place: d = d .* a
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA
>
void TensorMul(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a ///< A tensor reference
) {
TensorMul(d, d, a);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Divides two tensors and stores in the destination tensor: d = a ./ b
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB
>
void TensorDiv(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a, ///< A tensor reference
TensorRef<ElementB, LayoutB> b ///< B tensor reference
) {
detail::TensorFuncBinaryOp<
ElementD,
LayoutD,
ElementA,
LayoutA,
ElementB,
LayoutB,
cutlass::divides<ElementD>
> func(d, a, b);
TensorForEach(
d.extent(),
func);
}
/// Divides tensors in place: d = d ./ a
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA
>
void TensorDiv(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a ///< A tensor reference
) {
TensorDiv(d, d, a);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Divides two tensors and stores in the destination tensor: d = a ./ b
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB
>
void TensorModulus(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a, ///< A tensor reference
TensorRef<ElementB, LayoutB> b ///< B tensor reference
) {
detail::TensorFuncBinaryOp<
ElementD,
LayoutD,
ElementA,
LayoutA,
ElementB,
LayoutB,
cutlass::divides<ElementD>
> func(d, a, b);
TensorForEach(
d.extent(),
func);
}
/// Divides tensors in place: d = d ./ a
template <
typename ElementD,
typename LayoutD,
typename ElementA,
typename LayoutA
>
void TensorModulus(
TensorView<ElementD, LayoutD> d, ///< destination tensor view
TensorRef<ElementA, LayoutA> a ///< A tensor reference
) {
TensorDiv(d, d, a);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 9,027 | C | 25.397661 | 100 | 0.59311 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/arch/mma.h"
#include "cutlass/util/host_tensor.h"
namespace cutlass {
namespace reference {
namespace host {
template<typename Out, typename In>
struct CastIfScalar {
static Out cast(In in) {
return Out(in);
}
};
template<typename OutScalar, typename In>
struct CastIfScalar<cutlass::complex<OutScalar>, In> {
typedef cutlass::complex<OutScalar> Out;
static Out cast(In in) {
return Out(static_cast<OutScalar>(in));
}
};
template<typename OutScalar, typename InScalar>
struct CastIfScalar<cutlass::complex<OutScalar>, cutlass::complex<InScalar>> {
typedef cutlass::complex<OutScalar> Out;
typedef cutlass::complex<InScalar> In;
static Out cast(In in) {
return Out(in);
}
};
template<typename Out, typename In>
Out cast_if_scalar(In in) {
return CastIfScalar<Out, In>::cast(in);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N) {
ElementA a = tensor_a.at(MatrixCoord(row, k_block));
ElementB b = tensor_b.at(MatrixCoord(k_block, col));
ComputeType compute_a(cast_if_scalar<ComputeType>(a));
ComputeType compute_b(cast_if_scalar<ComputeType>(b));
accum[i][j] = inner_product_op(compute_a, compute_b, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * ScalarType(tensor_c.at(coord)));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddFastBF16> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, multiply_add<ComputeType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Parital specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
ComputeType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, xor_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, ComputeType, xor_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
typename TensorRefCollectionA::ConstIterator tensor_a_it = tensor_a.begin();
typename TensorRefCollectionB::ConstIterator tensor_b_it = tensor_b.begin();
typename TensorRefCollectionC::ConstIterator tensor_c_it = tensor_c.begin();
for (int batch = 0;
batch < batch_count;
++batch, ++tensor_a_it, ++tensor_b_it, ++tensor_c_it) {
Gemm<typename TensorRefCollectionA::Element,
typename TensorRefCollectionA::Layout,
typename TensorRefCollectionB::Element,
typename TensorRefCollectionB::Layout,
typename TensorRefCollectionC::Element,
typename TensorRefCollectionC::Layout,
typename TensorRefCollectionC::Element,
typename TensorRefCollectionC::Element>
gemm;
gemm(problem_size, alpha, *tensor_a_it, *tensor_b_it, beta, *tensor_c_it,
initial_accum);
}
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, batch_count, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 17,163 | C | 36.806167 | 100 | 0.619997 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/tensor_copy.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Standard Library includes
#include <utility>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Helper to convert between types
template <
typename DstElement,
typename SrcElement
>
struct TrivialConvert {
TrivialConvert() { }
DstElement operator()(SrcElement src) const {
return DstElement(src);
}
};
/// Helper to conditionally copy between tensor views.
template <
typename DstElement,
typename DstLayout,
typename SrcElement,
typename SrcLayout,
typename F
>
struct TensorCopyIf {
using DstTensorView = TensorView<DstElement, DstLayout>;
using SrcTensorView = TensorView<SrcElement, SrcLayout>;
//
// Data members
//
DstTensorView dst;
SrcTensorView src;
F convert;
//
// Methods
//
TensorCopyIf() { }
TensorCopyIf(
DstTensorView const &dst_,
SrcTensorView const &src_,
F const &convert_): dst(dst_), src(src_), convert(convert_) {}
/// Copies based on destination and source bounds
void operator()(Coord<DstLayout::kRank> const &coord) {
if (dst.contains(coord) && src.contains(coord)) {
dst.at(coord) = convert(src.at(coord));
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src,
F const &transform) {
using CopyIf = detail::TensorCopyIf<
DstElement,
DstLayout,
SrcElement,
SrcLayout,
F>;
CopyIf copy_if(dst, src, transform);
TensorForEach(dst.extent(), copy_if);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from a TensorRef into a TensorView. Assumes source tensor has sufficient extent
/// to avoid out of bounds accesses.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorRef<SrcElement, SrcLayout> src,
F const &transform) {
using CopyIf = detail::TensorCopyIf<
DstElement,
DstLayout,
SrcElement,
SrcLayout,
F>;
TensorView<SrcElement, SrcLayout> src_view(src, dst.extent());
CopyIf copy_if(dst, src_view, transform);
TensorForEach(dst.extent(), copy_if);
}
/// Copies elements from a TensorRef into a TensorView. Assumes source tensor has sufficient extent
/// to avoid out of bounds accesses.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorRef<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src,
F const &transform) {
using CopyIf = detail::TensorCopyIf<
DstElement,
DstLayout,
SrcElement,
SrcLayout,
F>;
TensorView<DstElement, DstLayout> dst_view(dst, src.extent());
CopyIf copy_if(dst_view, src, transform);
TensorForEach(src.extent(), copy_if);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds
/// if SrcElement can be converted to DstElement.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout /// Source tensor's layout
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src) {
detail::TrivialConvert<DstElement, SrcElement> convert;
TensorCopy(dst, src, convert);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds
/// if SrcElement can be converted to DstElement.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout, /// Source tensor's layout
typename F /// Transformation functor
>
void TensorCopy(
TensorView<DstElement, DstLayout> dst,
TensorRef<SrcElement, SrcLayout> src) {
detail::TrivialConvert<DstElement, SrcElement> convert;
TensorCopy(dst, src, convert);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies elements from one tensor view into another, satisfying bounds of each tensor. Succeeds
/// if SrcElement can be converted to DstElement.
template <
typename DstElement, /// Destination tensor's element type
typename DstLayout, /// Destination tensor's layout
typename SrcElement, /// Source tensor's element type
typename SrcLayout /// Source tensor's layout
>
void TensorCopy(
TensorRef<DstElement, DstLayout> dst,
TensorView<SrcElement, SrcLayout> src) {
detail::TrivialConvert<DstElement, SrcElement> convert;
TensorCopy(dst, src, convert);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 8,317 | C | 31.365759 | 100 | 0.630396 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/tensor_reduce.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/util/reference/detail/linear_to_coordinate.h"
#include "cutlass/core_io.h"
namespace cutlass {
namespace reference {
namespace host {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view,
ComputeType identity,
ReduceOp reduce,
TransformOp transform
) {
for (int64_t idx = 0; idx < view.size(); ++idx) {
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view.extent());
if (view.contains(coord)) {
Element x = view.at(coord);
identity = reduce(identity, transform(x));
}
}
return identity;
}
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity,
ReduceOp reduce,
TransformOp transform) {
if (view_A.extent() != view_B.extent()) {
throw std::runtime_error("Tensor extents must match.");
}
for (int64_t idx = 0; idx < view_A.size(); ++idx) {
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view_A.extent());
if (view_A.contains(coord)) {
Element a = view_A.at(coord);
Element b = view_B.at(coord);
identity = reduce(identity, transform(a, b));
}
}
return identity;
}
/// Helper to compute the sum of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSum(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
NumericConverter<ComputeType, Element> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the sum of the squares of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSumSq(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared<Element, ComputeType> transform;
return TensorTransformReduce(
view, identity, reduce, transform);
}
/// Helper to compute the norm of the elements of a tensor.
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNorm(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSq(view, identity));
}
/// Helper to compute the sum of the squares of the differences of two tensors
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorSumSqDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType()
) {
plus<ComputeType> reduce;
magnitude_squared_difference<Element, ComputeType> transform;
return TensorTransformReduce(
view_A, view_B, identity, reduce, transform);
}
/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNormDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType()
) {
return std::sqrt(TensorSumSqDiff(view_A, view_B, identity));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| 6,111 | C | 28.960784 | 100 | 0.678121 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/symm_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for complex-valued SYMM update in host-side code.
*/
#pragma once
#include "cutlass/blas3.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include <assert.h>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
BlasMode BlasMode_ = BlasMode::kSymmetric,
typename InnerProductOp = multiply_add<ComputeType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_symm_complex(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum,
int batch_count = 1,
int64_t batch_stride_A = 0,
int64_t batch_stride_B = 0,
int64_t batch_stride_C = 0,
int64_t batch_stride_D = 0) {
static SideMode const kSideModeA = SideModeA;
static FillMode const kFillModeA = FillModeA;
static BlasMode const kBlasMode = BlasMode_;
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
static_assert(kSideModeA != SideMode::kInvalid
, "Side Mode can either be Left or Right.");
static_assert(
kFillModeA == FillMode::kLower ||
kFillModeA == FillMode::kUpper,
"Fill Mode can either be Lower or Upper.");
using CompareOp_w_diag = typename TrMatrixCompareOp<kFillModeA, DiagType::kNonUnit>::Type;
using CompareOp_wo_diag = typename TrMatrixCompareOp<kFillModeA, DiagType::kZero>::Type;
// Note: batch is ignored.
int const M = problem_size.m();
int const N = problem_size.n();
// Assuming correct k-dimension value is passed
int const K = problem_size.k();
// Blocking necessary to speedup reference implementation
int const Mblock = 16;
int const Nblock = 16;
ConvertOp convert_op;
InnerProductOp inner_product_op;
CompareOp_w_diag compare_op_1;
CompareOp_wo_diag compare_op_2;
for (int batch_idx = 0; batch_idx < batch_count; ++batch_idx) {
// Compute matrix product using blocks
for (int row_block = 0; row_block < M; row_block += Mblock) {
for (int col_block = 0; col_block < N; col_block += Nblock) {
ComputeType accum[Mblock][Nblock];
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
accum[i][j] = initial_accum;
}
}
for (int k_block = 0; k_block < K; ++k_block) {
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
if (row < M && col < N)
{
ElementA a_1 = ElementA();
ElementB b_1 = ElementB();
ElementA a_2 = ElementA();
ElementB b_2 = ElementB();
// A x B or B x A (with diagonal)
if (kSideModeA == SideMode::kLeft) {
a_1 = (compare_op_1(row, k_block)) ?
(tensor_a.at(MatrixCoord(row, k_block))) : ElementA();
b_1 = tensor_b.at(MatrixCoord(k_block, col));
} else if (kSideModeA == SideMode::kRight) {
a_1 = tensor_b.at(MatrixCoord(row, k_block));
b_1 = (compare_op_1(k_block, col)) ?
tensor_a.at(MatrixCoord(k_block, col)) : ElementA();
}
ComputeType compute_a_1 = ComputeType(a_1);
ComputeType compute_b_1 = ComputeType(b_1);
// The imaginary parts of the diagonal elements of
// a complex data type are assumed and set to zero
if (kBlasMode == BlasMode::kHermitian && kSideModeA == SideMode::kLeft && row == k_block) {
compute_a_1 = real(compute_a_1);
} else if (kBlasMode == BlasMode::kHermitian && kSideModeA == SideMode::kRight && k_block == col) {
compute_b_1 = real(compute_b_1);
}
accum[i][j] = inner_product_op(compute_a_1, compute_b_1, accum[i][j]);
// A^T x B or B x A^T (without diagonal)
if (kSideModeA == SideMode::kLeft) {
a_2 = (compare_op_2(k_block, row)) ?
(tensor_a.at(MatrixCoord(k_block, row))) : ElementA();
b_2 = tensor_b.at(MatrixCoord(k_block, col));
if (kBlasMode == BlasMode::kHermitian)
a_2 = conj(a_2);
} else if (kSideModeA == SideMode::kRight) {
a_2 = tensor_b.at(MatrixCoord(row, k_block));
b_2 = (compare_op_2(col, k_block)) ?
tensor_a.at(MatrixCoord(col, k_block)) : ElementA();
if (kBlasMode == BlasMode::kHermitian)
b_2 = conj(b_2);
}
ComputeType compute_a_2 = ComputeType(a_2);
ComputeType compute_b_2 = ComputeType(b_2);
accum[i][j] = inner_product_op(compute_a_2, compute_b_2, accum[i][j]);
}
}
}
}
for (int j = 0; j < Nblock; j++) {
for (int i = 0; i < Mblock; i++) {
int row = row_block + i;
int col = col_block + j;
MatrixCoord coord = MatrixCoord(row, col);
if (row < M && col < N) {
ScalarType c = tensor_c.at(coord);
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[i][j]) +
beta * c);
}
}
}
} // for (col_block)
} // for (row_block)
tensor_a.add_pointer_offset(batch_stride_A);
tensor_b.add_pointer_offset(batch_stride_B);
tensor_c.add_pointer_offset(batch_stride_C);
tensor_d.add_pointer_offset(batch_stride_D);
} // for (batch_idx)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename ElementA,
typename LayoutA,
SideMode SideModeA,
FillMode FillModeA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename ComputeType,
BlasMode BlasMode_ = cutlass::BlasMode::kSymmetric,
typename InnerProductOp = cutlass::arch::OpMultiplyAddComplex
>
struct SymmComplex;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA,
SideMode SideModeA, FillMode FillModeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType,
BlasMode BlasMode_>
struct SymmComplex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC, ScalarType,
ComputeType, BlasMode_,
arch::OpMultiplyAddComplex> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_symm_complex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC,
ScalarType, ComputeType, BlasMode_, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for gaussian multiply-add
template <typename ElementA, typename LayoutA,
SideMode SideModeA, FillMode FillModeA,
typename ElementB, typename LayoutB,
typename ElementC, typename LayoutC,
typename ScalarType, typename ComputeType,
BlasMode BlasMode_>
struct SymmComplex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC, ScalarType,
ComputeType, BlasMode_,
arch::OpMultiplyAddGaussianComplex> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
ComputeType initial_accum = ComputeType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_symm_complex<ElementA, LayoutA,
SideModeA, FillModeA,
ElementB, LayoutB,
ElementC, LayoutC,
ScalarType, ComputeType, BlasMode_, multiply_add<ComputeType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
| 12,296 | C | 37.428125 | 115 | 0.573276 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/host/convolution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for convolution in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/tensor_view.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
#include <iostream>
namespace cutlass {
namespace reference {
namespace host {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Forward propagation
////////////////////////////////////////////////////////////////////////////////////////////////////
/// y = conv2d(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
int group_idx = k / (problem_size.K / problem_size.groups);
int channels_per_group = problem_size.C / problem_size.groups;
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < channels_per_group; ++c) {
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) {
ElementA a = tensor_x.at({n, h, w, c + group_idx * channels_per_group});
ElementB b = tensor_w.at({k, r, s, c});
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_y_in.at(cutlass::make_Coord(n, p, q, k));
}
tensor_y_out.at(cutlass::make_Coord(n, p, q, k)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
/// Depthwise-separable convolution
template <typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator> >
void Depsep_Fprop(cutlass::TensorView<ElementA, LayoutA> tensor_A,
cutlass::TensorView<ElementB, LayoutB> tensor_B,
cutlass::TensorView<ElementC, LayoutC> tensor_C,
cutlass::TensorView<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cutlass::Tensor4DCoord padding = cutlass::Tensor4DCoord(),
cutlass::Coord<2> conv_stride = cutlass::Coord<2>(),
cutlass::Coord<2> dilation = cutlass::Coord<2>(),
cutlass::conv::Mode mode = cutlass::conv::Mode::kCrossCorrelation) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < tensor_C.extent().n(); ++n) {
for (int p = 0; p < tensor_C.extent().h(); ++p) {
for (int q = 0; q < tensor_C.extent().w(); ++q) {
for (int g = 0; g < tensor_C.extent().c(); ++g) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < tensor_B.extent().h(); ++r) {
for (int s = 0; s < tensor_B.extent().w(); ++s) {
// input activation H and W
int h = p * conv_stride[0] - padding[0] + r * dilation[0];
int w = q * conv_stride[1] - padding[2] + s * dilation[1];
if (h < tensor_A.extent().h() && h >= 0 && w < tensor_A.extent().w() && w >= 0) {
ElementA a = tensor_A.at(cutlass::make_Coord(n, h, w, g));
ElementB b = (mode == cutlass::conv::Mode::kCrossCorrelation)
? tensor_B.at(cutlass::make_Coord(g, r, s, 0))
: tensor_B.at(cutlass::make_Coord(
g, tensor_B.extent().h() - r - 1, tensor_B.extent().w() - s - 1, 0));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = tensor_C.at(cutlass::make_Coord(n, p, q, g));
tensor_D.at(cutlass::make_Coord(n, p, q, g)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Dgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dDgrad(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int h = 0; h < problem_size.H; ++h) {
for (int w = 0; w < problem_size.W; ++w) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int k = 0; k < problem_size.K; ++k) {
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (p >= 0 && (p % problem_size.stride_h) == 0 &&
q >= 0 && (q % problem_size.stride_w) == 0) {
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
#if 0
std::cout << "row:"
<< n * problem_size.H * problem_size.W +
h * problem_size.W +
w << " "
<< "n, p, q: ("
<< n << ", "
<< p << ", "
<< q << ") * "
<< "r, s: ("
<< r << ", "
<< s << ") ["
<< ((p < problem_size.P && q < problem_size.Q) ? "true":"false") << "]"
<< std::endl;
#endif
if (p < problem_size.P && q < problem_size.Q) {
ElementA a = tensor_dy.at(cutlass::make_Coord(n, p, q, k));
ElementB b = tensor_w.at(cutlass::make_Coord(k, r, s, c));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
} // for (K)
} // for (S)
} // for (R)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dx_in.at(cutlass::make_Coord(n, h, w, c));
}
tensor_dx_out.at(cutlass::make_Coord(n, h, w, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (W)
} // for (H)
} // for (N)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2dWgrad(
cutlass::conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta) {
InnerProductOp inner_product_op;
ConvertOp convert_op;
// Apply MMA and accumulate ElementAccumulator
for (int k = 0; k < problem_size.K; ++k) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
cutlass::Tensor4DCoord b_coord;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
b_coord = make_Coord(
n,
p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h,
q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w,
c);
if (b_coord.h() < problem_size.H && b_coord.h() >= 0 &&
b_coord.w() < problem_size.W && b_coord.w() >= 0) {
ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, p, q, k)));
ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord));
acc = inner_product_op(a, b, acc);
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dw_in.at(cutlass::make_Coord(k, r, s, c));
}
tensor_dw_out.at(cutlass::make_Coord(k, r, s, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (S)
} // for (R)
} // for (K)
}
/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv2d(
conv::Operator convolutional_operator,
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
Conv2dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kDgrad:
Conv2dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kWgrad:
Conv2dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
default:
break;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// 3D convolution
////////////////////////////////////////////////////////////////////////////////////////////////////
/// y = conv3d(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int z = 0; z < problem_size.Z; ++z) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
ElementAccumulator acc = ElementAccumulator();
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int d = z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d;
int h = p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (d >= 0 && d < problem_size.D &&
h >=0 && h < problem_size.H &&
w >= 0 && w < problem_size.W) {
ElementA a = tensor_x.at({n, d, h, w, c});
ElementB b = tensor_w.at({k, t, r, s, c});
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_y_in.at(cutlass::make_Coord(n, z, p, q, k));
}
tensor_y_out.at(cutlass::make_Coord(n, z, p, q, k)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Dgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dDgrad(
cutlass::conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
// Apply MMA and accumulate ElementAccumulator
for (int n = 0; n < problem_size.N; ++n) {
for (int d = 0; d < problem_size.D; ++d) {
for (int h = 0; h < problem_size.H; ++h) {
for (int w = 0; w < problem_size.W; ++w) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int k = 0; k < problem_size.K; ++k) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
int z = d + problem_size.pad_d - filter_t * problem_size.dilation_d;
int p = h + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = w + problem_size.pad_w - filter_s * problem_size.dilation_w;
if (z >= 0 && (z % problem_size.stride_d) == 0 &&
p >= 0 && (p % problem_size.stride_h) == 0 &&
q >= 0 && (q % problem_size.stride_w) == 0) {
z = z / problem_size.stride_d;
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (z < problem_size.Z && p < problem_size.P && q < problem_size.Q) {
ElementA a = tensor_dy.at(cutlass::make_Coord(n, z, p, q, k));
ElementB b = tensor_w.at(cutlass::make_Coord(k, t, r, s, c));
acc = inner_product_op(ElementAccumulator(a), ElementAccumulator(b), acc);
}
}
} // for (K)
} // for (S)
} // for (R)
} // for (T)
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dx_in.at(cutlass::make_Coord(n, d, h, w, c));
}
tensor_dx_out.at(cutlass::make_Coord(n, d, h, w, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (W)
} // for (H)
} // for (D)
} // for (N)
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wgrad
////////////////////////////////////////////////////////////////////////////////////////////////////
/// dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3dWgrad(
cutlass::conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta) {
InnerProductOp inner_product_op;
ConvertOp convert_op;
// Apply MMA and accumulate ElementAccumulator
for (int k = 0; k < problem_size.K; ++k) {
for (int t = 0; t < problem_size.T; ++t) {
for (int r = 0; r < problem_size.R; ++r) {
for (int s = 0; s < problem_size.S; ++s) {
for (int c = 0; c < problem_size.C; ++c) {
ElementAccumulator acc = ElementAccumulator();
for (int n = 0; n < problem_size.N; ++n) {
for (int z = 0; z < problem_size.Z; ++z) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
int filter_t = t;
int filter_r = r;
int filter_s = s;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - t;
filter_r = problem_size.R - 1 - r;
filter_s = problem_size.S - 1 - s;
}
Tensor5DCoord b_coord = make_Coord(
n,
z * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d,
p * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h,
q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w,
c);
if (b_coord.d() < problem_size.D && b_coord.d() >= 0 &&
b_coord.h() < problem_size.H && b_coord.h() >= 0 &&
b_coord.w() < problem_size.W && b_coord.w() >= 0) {
ElementAccumulator a = ElementAccumulator(tensor_dy.at(cutlass::make_Coord(n, z, p, q, k)));
ElementAccumulator b = ElementAccumulator(tensor_x.at(b_coord));
acc = inner_product_op(a, b, acc);
}
}
}
}
}
// Apply Epilogue, compute ElementCompute, convert and store ElementC
ElementC c_ref = ElementC();
if (beta != ElementCompute()) {
c_ref = tensor_dw_in.at(cutlass::make_Coord(k, t, r, s, c));
}
tensor_dw_out.at(cutlass::make_Coord(k, t, r, s, c)) =
convert_op(alpha * ElementCompute(acc) + beta * ElementCompute(c_ref));
} // for (C)
} // for (S)
} // for (R)
} // for (T)
} // for (K)
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Generic 3D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
void Conv3d(
conv::Operator convolutional_operator,
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
Conv3dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kDgrad:
Conv3dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
case conv::Operator::kWgrad:
Conv3dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta);
break;
default:
break;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace host
} // namespace reference
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 28,439 | C | 35 | 114 | 0.526038 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/tensor_foreach.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <stdexcept>
#include "cutlass/cutlass.h"
#include "cutlass/util/reference/device/kernel/tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Launches a kernel calling a functor for each element in a tensor's index space.
template <typename Func, int Rank, typename Params>
struct TensorForEach {
/// Constructor performs the operation.
TensorForEach(Coord<Rank> size, Params params = Params(), int grid_size = 0, int block_size = 0) {
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::TensorForEach<Func, Rank, Params>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::TensorForEach<Func, Rank, Params><<< grid, block >>>(size, params);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Launches a kernel calling a functor for each element along a tensor's diagonal
template <typename Func, int Rank, typename Params>
struct TensorDiagonalForEach {
/// Constructor performs the operation
TensorDiagonalForEach(Coord<Rank> size, Params params = Params(), int start = 0, int end = -1, int block_size = 128) {
if (end < 0) {
end = size.min();
}
dim3 block(block_size, 1, 1);
dim3 grid((end - start + block_size - 1) / block_size, 1, 1);
kernel::TensorDiagonalForEach<Func, Rank, Params><<< grid, block >>>(size, params, start, end);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
struct BlockForEach {
/// Constructor performs the operation.
BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params = typename Func::Params(),
int grid_size = 0,
int block_size = 0) {
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::BlockForEach<Element, Func>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::BlockForEach<Element, Func><<< grid, block >>>(ptr, capacity, params);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namesace cutlass
| 5,293 | C | 37.642335 | 121 | 0.622331 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/tensor_compare.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines host-side elementwise operations on TensorView.
*/
#pragma once
// Standard Library includes
#include <utility>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/relatively_equal.h"
#include "cutlass/util/distribution.h"
#include "tensor_foreach.h"
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
template <typename Element>
__global__ void BlockCompareEqual(
int *equal,
Element const *ptr_A,
Element const *ptr_B,
size_t capacity) {
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
for (; idx < capacity; idx += gridDim.x * blockDim.x) {
Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx);
Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx);
if (a != b) {
*equal = 0;
return;
}
}
}
template <typename Element>
__global__ void BlockCompareRelativelyEqual(
int *equal,
Element const *ptr_A,
Element const *ptr_B,
size_t capacity,
Element epsilon,
Element nonzero_floor) {
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
for (; idx < capacity; idx += gridDim.x * blockDim.x) {
Element a = cutlass::ReferenceFactory<Element>::get(ptr_A, idx);
Element b = cutlass::ReferenceFactory<Element>::get(ptr_B, idx);
if (!relatively_equal(a, b, epsilon, nonzero_floor)) {
*equal = 0;
return;
}
}
}
} // namespace kernel
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performs a bit-level equality check between two blocks
template <typename Element>
bool BlockCompareEqual(
Element const *ptr_A,
Element const *ptr_B,
size_t capacity,
int grid_size = 0,
int block_size = 0) {
int equal_flag = 1;
int *device_equal_flag = nullptr;
if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) {
throw std::runtime_error("Failed to allocate device flag.");
}
if (cudaMemcpy(
device_equal_flag,
&equal_flag,
sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
throw std::runtime_error("Failed to copy equality flag to device.");
}
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::BlockCompareEqual<Element>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::BlockCompareEqual<Element><<< grid, block >>>(device_equal_flag, ptr_A, ptr_B, capacity);
if (cudaMemcpy(
&equal_flag,
device_equal_flag,
sizeof(int),
cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(device_equal_flag);
throw std::runtime_error("Failed to copy equality flag from device.");
}
cudaFree(device_equal_flag);
return equal_flag;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performs a bit-level equality check between two blocks
template <typename Element>
bool BlockCompareRelativelyEqual(
Element const *ptr_A,
Element const *ptr_B,
size_t capacity,
Element epsilon,
Element nonzero_floor,
int grid_size = 0,
int block_size = 0) {
int equal_flag = 1;
int *device_equal_flag = nullptr;
if (cudaMalloc((void **)&device_equal_flag, sizeof(int)) != cudaSuccess) {
throw std::runtime_error("Failed to allocate device flag.");
}
if (cudaMemcpy(
device_equal_flag,
&equal_flag,
sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
throw std::runtime_error("Failed to copy equality flag to device.");
}
if (!grid_size || !block_size) {
// if grid_size or block_size are zero, query occupancy using the CUDA Occupancy API
cudaError_t result = cudaOccupancyMaxPotentialBlockSize(
&grid_size,
&block_size,
reinterpret_cast<void const *>(kernel::BlockCompareRelativelyEqual<Element>));
if (result != cudaSuccess) {
throw std::runtime_error("Failed to query occupancy.");
}
// Limit block size. This has the effect of increasing the number of items processed by a
// single thread and reduces the impact of initialization overhead.
block_size = (block_size < 128 ? block_size : 128);
}
dim3 grid(grid_size, 1, 1);
dim3 block(block_size, 1, 1);
kernel::BlockCompareRelativelyEqual<Element><<< grid, block >>>(
device_equal_flag,
ptr_A,
ptr_B,
capacity,
epsilon,
nonzero_floor
);
if (cudaMemcpy(
&equal_flag,
device_equal_flag,
sizeof(int),
cudaMemcpyDeviceToHost) != cudaSuccess) {
cudaFree(device_equal_flag);
throw std::runtime_error("Failed to copy equality flag from device.");
}
cudaFree(device_equal_flag);
return equal_flag;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // device
} // reference
} // cutlass
| 7,278 | C | 28.469636 | 100 | 0.634515 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/tensor_fill.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines device-side elementwise operations on TensorView. Note, the operations defined
in this header are not specialized for any particular data layout and are therefore not
intended to offer the best possible performance. Rather, they are intended to be generic
reference implementations to support the CUTLASS unit tests.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
// Standard Library includes
#include <utility>
#include <cstdlib>
#include <cmath>
#include <type_traits>
#include <cstdint>
#endif
// CUDA includes
#include <curand_kernel.h>
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_view.h"
#include "cutlass/blas3.h"
#include "cutlass/util/reference/device/tensor_foreach.h"
#include "cutlass/util/distribution.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <typename FloatType>
CUTLASS_DEVICE
FloatType random_normal_float(curandState_t *state) {
return curand_normal(state);
}
template <>
CUTLASS_DEVICE
double random_normal_float<double>(curandState_t *state) {
return curand_normal_double(state);
}
template <typename FloatType>
CUTLASS_DEVICE
FloatType random_uniform_float(curandState_t *state) {
return curand_uniform(state);
}
template <>
CUTLASS_DEVICE
double random_uniform_float<double>(curandState_t *state) {
return curand_uniform_double(state);
}
template <typename Element>
struct RandomGaussianFunc {
using FloatType = typename std::conditional<(sizeof(Element) > 4), double, float>::type;
using IntType = typename std::conditional<(sizeof(Element) > 4), int64_t, int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType mean;
FloatType stddev;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
Element mean_ = 0,
Element stddev_ = 1,
int int_scale_ = -1
):
seed(seed_),
mean(static_cast<FloatType>(mean_)),
stddev(static_cast<FloatType>(stddev_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(1) << int_scale);
float_scale_up += FloatType(0.5) * float_scale_up;
float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomGaussianFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd = random_normal_float<FloatType>(&rng_state);
rnd = params.mean + params.stddev * rnd;
Element result;
if (params.int_scale >= 0) {
rnd = FloatType(IntType(rnd * params.float_scale_up));
result = Element(rnd * params.float_scale_down);
}
else {
result = Element(rnd);
}
return result;
}
};
template <typename Real>
struct RandomGaussianFunc<complex<Real>> {
using Element = complex<Real>;
using FloatType = typename std::conditional<(sizeof(Real) > 4), double, float>::type;
using IntType = typename std::conditional<(sizeof(Real) > 4), int64_t, int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType mean;
FloatType stddev;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
Real mean_ = 0,
Real stddev_ = 1,
int int_scale_ = -1
):
seed(seed_),
mean(static_cast<FloatType>(mean_)),
stddev(static_cast<FloatType>(stddev_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(1) << int_scale);
float_scale_up += FloatType(0.5) * float_scale_up;
float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomGaussianFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd_r = random_normal_float<FloatType>(&rng_state);
FloatType rnd_i = random_normal_float<FloatType>(&rng_state);
rnd_r = params.mean + params.stddev * rnd_r;
rnd_i = params.mean + params.stddev * rnd_i;
Element result;
if (params.int_scale >= 0) {
rnd_r = FloatType(IntType(rnd_r * params.float_scale_up));
rnd_i = FloatType(IntType(rnd_i * params.float_scale_down));
result = {
Real(rnd_r * params.float_scale_down),
Real(rnd_i * params.float_scale_down)
};
}
else {
result = Element(Real(rnd_r), Real(rnd_i));
}
return result;
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomGaussianFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
using RandomFunc = RandomGaussianFunc<Element>;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
typename RandomFunc::Params random;
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
typename RandomFunc::Params random_ = typename RandomFunc::Params()
):
view(view_), random(random_) {
}
};
//
// Data members
//
Params params;
RandomFunc random;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillRandomGaussianFunc(Params const ¶ms): params(params), random(params.random) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
params.view.at(coord) = random();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomGaussian(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed, ///< seed for RNG
Element mean = Element(0), ///< Gaussian distribution's mean
Element stddev = Element(1), ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
using RandomFunc = detail::RandomGaussianFunc<Element>;
using Func = detail::TensorFillRandomGaussianFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, typename RandomFunc::Params(seed, mean, stddev, bits))
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a Gaussian distribution.
template <typename Element> ///< Element type
void BlockFillRandomGaussian(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
typename RealType<Element>::Type mean, ///< Gaussian distribution's mean
typename RealType<Element>::Type stddev, ///< Gaussian distribution's standard deviation
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
using RandomFunc = detail::RandomGaussianFunc<Element>;
typename RandomFunc::Params params(seed, mean, stddev, bits);
BlockForEach<Element, RandomFunc>(ptr, capacity, params);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <typename Element> ///< Element type
struct RandomUniformFunc {
using FloatType = typename std::conditional<
(sizeof(Element) > 4),
double,
float>::type;
using IntType = typename std::conditional<
(sizeof(Element) > 4),
int64_t,
int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType range;
FloatType max;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
Element max_ = 1,
Element min = 0,
int int_scale_ = -1
):
seed(seed_),
range(static_cast<FloatType>(max_ - min)),
max(static_cast<FloatType>(max_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(1) << int_scale);
float_scale_up += FloatType(0.5) * float_scale_up;
float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomUniformFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd = random_uniform_float<FloatType>(&rng_state);
rnd = params.max - params.range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (params.int_scale >= 0) {
rnd = FloatType(IntType(rnd * params.float_scale_up));
result = Element(rnd * params.float_scale_down);
}
else {
result = Element(rnd);
}
return result;
}
};
/// Computes a random Gaussian distribution
template <typename Real>
struct RandomUniformFunc<complex<Real>> {
using Element = complex<Real>;
using FloatType = typename std::conditional<
(sizeof(Real) > 4),
double,
float>::type;
using IntType = typename std::conditional<
(sizeof(Real) > 4),
int64_t,
int>::type;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType range;
FloatType min;
int int_scale;
FloatType float_scale_up;
FloatType float_scale_down;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
FloatType max = 1,
FloatType min_ = 0,
int int_scale_ = -1
):
seed(seed_),
range(static_cast<FloatType>(max - min_)),
min(static_cast<FloatType>(min_)),
int_scale(int_scale_) {
float_scale_up = FloatType(IntType(1) << int_scale);
float_scale_up += FloatType(0.5) * float_scale_up;
float_scale_down = FloatType(1) / FloatType(IntType(1) << int_scale);
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomUniformFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
FloatType rnd_r = random_uniform_float<FloatType>(&rng_state);
FloatType rnd_i = random_uniform_float<FloatType>(&rng_state);
rnd_r = params.min + params.range * rnd_r;
rnd_i = params.min + params.range * rnd_i;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
Element result;
if (params.int_scale >= 0) {
rnd_r = FloatType(IntType(rnd_r * params.float_scale_up));
rnd_i = FloatType(IntType(rnd_i * params.float_scale_up));
result = {
Real(rnd_r * params.float_scale_down),
Real(rnd_i * params.float_scale_down)
};
}
else {
result = Element(Real(rnd_r), Real(rnd_i));
}
return result;
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomUniformFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
using RandomFunc = RandomUniformFunc<Element>;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
typename RandomFunc::Params random;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
typename RandomFunc::Params random_ = RandomFunc::Params()
):
view(view_), random(random_) {
}
};
//
// Data members
//
Params params;
RandomFunc random;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillRandomUniformFunc(Params const ¶ms): params(params), random(params.random) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
params.view.at(coord) = random();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomUniform(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed, ///< seed for RNG
Element max = Element(1), ///< upper bound of distribution
Element min = Element(0), ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
using RandomFunc = detail::RandomUniformFunc<Element>;
using Func = detail::TensorFillRandomUniformFunc<Element, Layout>;
using Params = typename Func::Params;
typename RandomFunc::Params random(seed, max, min, bits);
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, random)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <typename Element>
void BlockFillRandomUniform(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
typename RealType<Element>::Type max, ///< upper bound of distribution
typename RealType<Element>::Type min, ///< lower bound for distribution
int bits = -1) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
using RandomFunc = detail::RandomUniformFunc<Element>;
typename RandomFunc::Params params(seed, max, min, bits);
BlockForEach<Element, RandomFunc>(ptr, capacity, params);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random sparse meta
template <typename Element> ///< Element type
struct RandomSparseMetaFunc {
using FloatType = float;
using IntType = int32_t;
/// Parameters structure
struct Params {
//
// Data members
//
uint64_t seed;
FloatType range;
int MetaSizeInBits;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
uint64_t seed_ = 0,
int MetaSizeInBits_ = 2
):
seed(seed_),
MetaSizeInBits(MetaSizeInBits_) {
if (MetaSizeInBits_ == 2) {
range = 6;
} else if (MetaSizeInBits_ == 4) {
range = 2;
}
}
};
//
// Data members
//
/// Parameters object
Params params;
/// RNG state object
curandState_t rng_state;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
RandomSparseMetaFunc(Params const ¶ms): params(params) {
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(params.seed, gtid, 0, &rng_state);
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
Element operator()() {
Element FourToTwoMeta[6] = {0x4, 0x8, 0x9, 0xc, 0xd, 0xe};
Element TwoToOneMeta[2] = {0x4, 0xe};
Element *MetaArray =
(params.MetaSizeInBits == 2) ? FourToTwoMeta : TwoToOneMeta;
Element result = 0x0;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < cutlass::sizeof_bits<Element>::value / 4; ++i) {
FloatType rnd = random_uniform_float<FloatType>(&rng_state);
rnd = params.range * rnd;
Element meta = MetaArray[(int)rnd];
result = (Element)(result | ((Element)(meta << (i * 4))));
}
return result;
}
};
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillRandomSparseMetaFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
using RandomFunc = RandomSparseMetaFunc<Element>;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
typename RandomFunc::Params random;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
typename RandomFunc::Params random_ = RandomFunc::Params()
):
view(view_), random(random_) {
}
};
//
// Data members
//
Params params;
RandomFunc random;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillRandomSparseMetaFunc(Params const ¶ms): params(params), random(params.random) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
params.view.at(coord) = random();
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillRandomSparseMeta(
TensorView<Element, Layout> view, ///< destination tensor
uint64_t seed, ///< seed for RNG
int MetaSizeInBits = 2) { ///< If non-negative, specifies number of fractional bits that
/// are not truncated to zero. Permits reducing precision of
/// data.
using RandomFunc = detail::RandomSparseMetaFunc<Element>;
using Func = detail::TensorFillRandomUniformFunc<Element, Layout>;
using Params = typename Func::Params;
typename RandomFunc::Params random(seed, MetaSizeInBits);
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, random)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with random values with a uniform random distribution.
template <typename Element>
void BlockFillRandomSparseMeta(
Element *ptr,
size_t capacity,
uint64_t seed, ///< seed for RNG
int MetaSizeInBits = 2) { ///< meta data size
using RandomFunc = detail::RandomSparseMetaFunc<Element>;
typename RandomFunc::Params params(seed, MetaSizeInBits);
BlockForEach<Element, RandomFunc>(ptr, capacity, params);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Functor to fill a tensor with zeros off the diagonal and a uniform value on the diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillDiagonalFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element diag;
Element other;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
Params(
TensorView view_ = TensorView(),
Element diag_ = Element(1),
Element other_ = Element(0)
):
view(view_), diag(diag_), other(other_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillDiagonalFunc(Params const ¶ms): params(params) {
}
/// Updates the tensor
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
params.view.at(coord) = (is_diag ? params.diag : params.other);
}
};
// Overwrites the elements of a tensor with a uniform value depending on fill mode
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillPartialFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element element;
FillMode fill_mode;
/// Default ctor
CUTLASS_HOST_DEVICE
Params(): fill_mode(FillMode::kNone) { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_,
Element element_,
FillMode fill_mode_
):
view(view_), element(element_), fill_mode(fill_mode_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
CUTLASS_DEVICE
TensorFillPartialFunc(Params const ¶ms): params(params) {
}
/// Overwrites the element if it is within the covered region.
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool predicate = true;
switch (params.fill_mode) {
case FillMode::kFull:
predicate = true;
break;
case FillMode::kLower:
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i - 1] < coord[i]) {
predicate = false;
break;
}
}
break;
case FillMode::kUpper:
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i - 1] > coord[i]) {
predicate = false;
break;
}
}
break;
case FillMode::kDiagonal:
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i - 1] != coord[i]) {
predicate = false;
break;
}
}
break;
case FillMode::kNone: // fall-through
default:
predicate = false;
break;
}
if (predicate) {
params.view.at(coord) = params.element;
}
}
};
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorClearPartialFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
///
static_assert((Layout::kRank == 2), "TensorClearPartial is only supported for matrices");
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element element;
FillMode fill_mode;
int alignment;
/// Default ctor
CUTLASS_HOST_DEVICE
Params(): fill_mode(FillMode::kNone) { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_,
Element element_,
FillMode fill_mode_,
int alignment_
):
view(view_), element(element_), fill_mode(fill_mode_), alignment(alignment_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
CUTLASS_DEVICE
TensorClearPartialFunc(Params const ¶ms): params(params) {
}
/// Overwrites the element if it is within the covered region.
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool predicate = true;
switch (params.fill_mode) {
case FillMode::kLower:
if ((coord[0] >= coord[1]) ||
((coord[1] - coord[0]) >= params.alignment)) {
predicate = false;
break;
}
break;
case FillMode::kUpper:
if ((coord[0] <= coord[1]) ||
((coord[0] - coord[1]) >= params.alignment)) {
predicate = false;
break;
}
break;
case FillMode::kNone: // fall-through
default:
predicate = false;
break;
}
if (predicate) {
params.view.at(coord) = params.element;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor everywhere with a unique value for its diagonal.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillDiagonal(
TensorView<Element, Layout> view, ///< destination tensor
Element diag = Element(1), ///< value to write in the diagonal
Element other = Element(0)) { ///< value to write off the diagonal
typedef detail::TensorFillDiagonalFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, diag, other)
);
}
/// Fills a tensor partially depending on fill mode. Elements not covered by the fillmode are
/// not written.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillPartial(
TensorView<Element, Layout> view, ///< destination tensor
Element element,
FillMode fill_mode) {
typedef detail::TensorFillPartialFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, element, fill_mode)
);
}
/// Clears a tensor partially depending on fill mode and alignment. Elements on the wrong-side
/// of fillmode (upto the alignment) are overwritten with the user supplied element (typically zeros)
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorClearPartial(
TensorView<Element, Layout> view, ///< destination tensor
Element element,
FillMode fill_mode,
int alignment) {
typedef detail::TensorClearPartialFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, element, fill_mode, alignment)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor with a uniform value
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFill(
TensorView<Element, Layout> view, ///< destination tensor
Element val = Element(0)) { ///< value to uniformly fill it with
TensorFillDiagonal(view, val, val);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a tensor's digonal with 1 and 0 everywhere else.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillIdentity(
TensorView<Element, Layout> view) { ///< destination tensor
TensorFillDiagonal(view, Element(1), Element(0));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateDiagonalFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element diag;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
Element diag_ = Element(1)
):
view(view_), diag(diag_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorUpdateDiagonalFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (is_diag) {
params.view.at(coord) = params.diag;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to the diagonal of a tensor without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateDiagonal(
TensorView<Element, Layout> view, ///< destination tensor
Element diag = Element(1)) {
typedef detail::TensorUpdateDiagonalFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, diag)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorUpdateOffDiagonalFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element other;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_ = TensorView(),
Element other_ = Element(0)
):
view(view_), other(other_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorUpdateOffDiagonalFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diag = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[i - 1]) {
is_diag = false;
break;
}
}
if (!is_diag) {
params.view.at(coord) = params.other;
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Writes a uniform value to all elements in the tensor without modifying diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorUpdateOffDiagonal(
TensorView<Element, Layout> view, ///< destination tensor
Element other = Element(1)) {
typedef detail::TensorUpdateOffDiagonalFunc<Element, Layout> Func;
typedef typename Func::Params Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, other)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorFillLinearFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Array<Element, Layout::kRank> v;
Element s;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_, ///< destination tensor
Array<Element, Layout::kRank> const & v_,
Element s_ = Element(0)
):
view(view_), v(v_), s(s_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorFillLinearFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
Element sum = params.s;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Layout::kRank; ++i) {
sum += params.v[i] * Element(coord[i]);
}
params.view.at(coord) = sum;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills tensor with a linear combination of its coordinate and another vector
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorFillLinear(
TensorView<Element, Layout> view, ///< destination tensor
Array<Element, Layout::kRank> const & v,
Element s = Element(0)) {
using Func = detail::TensorFillLinearFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, v, s)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillSequential(
Element *ptr,
int64_t capacity,
Element v = Element(1),
Element s = Element(0)) {
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Fills a block of data with sequential elements
template <
typename Element
>
void BlockFillRandom(
Element *ptr,
size_t capacity,
uint64_t seed,
Distribution dist) {
using Real = typename RealType<Element>::Type;
if (dist.kind == Distribution::Gaussian) {
BlockFillRandomGaussian<Element>(
ptr,
capacity,
seed,
static_cast<Real>(dist.gaussian.mean),
static_cast<Real>(dist.gaussian.stddev),
dist.int_scale);
}
else if (dist.kind == Distribution::Uniform) {
BlockFillRandomUniform<Element>(
ptr,
capacity,
seed,
static_cast<Real>(dist.uniform.max),
static_cast<Real>(dist.uniform.min),
dist.int_scale);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorCopyDiagonalInFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element const *ptr;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_, ///< destination tensor
Element const *ptr_
):
view(view_), ptr(ptr_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorCopyDiagonalInFunc(Params const ¶ms): params(params) {
}
/// Only update the diagonal element
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diagonal = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[0]) {
is_diagonal = false;
}
}
if (is_diagonal) {
params.view.at(coord) = params.ptr[coord[0]];
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies a diagonal in from host memory without modifying off-diagonal elements.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalIn(
TensorView<Element, Layout> view, ///< destination tensor
Element const *ptr) { ///< dense buffer of elements
using Func = detail::TensorCopyDiagonalInFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, ptr)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
/// Computes a random Gaussian distribution
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorCopyDiagonalOutFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Scalar type
typedef typename TensorView::Element T;
/// Coordinate in tensor's index space
typedef typename TensorView::TensorCoord TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element *ptr;
/// Default ctor
CUTLASS_HOST_DEVICE
Params() { }
//
// Methods
//
/// Construction of Gaussian RNG functor.
Params(
TensorView view_, ///< destination tensor
Element *ptr_
):
view(view_), ptr(ptr_) {
}
};
//
// Data members
//
/// Parameters object
Params params;
//
// Methods
//
/// Device-side initialization of RNG
CUTLASS_DEVICE
TensorCopyDiagonalOutFunc(Params const ¶ms): params(params) {
}
/// Compute random value and update RNG state
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
bool is_diagonal = true;
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < Layout::kRank; ++i) {
if (coord[i] != coord[0]) {
is_diagonal = false;
}
}
if (is_diagonal) {
params.ptr[coord[0]] = params.view.at(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Copies the diagonal of a tensor into a dense buffer in host memory.
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorCopyDiagonalOut(
Element *ptr, ///< dense buffer of elements
TensorView<Element, Layout> view) { ///< source tensor
using Func = detail::TensorCopyDiagonalOutFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, ptr)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| 46,444 | C | 23.457609 | 114 | 0.566123 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/tensor_relu.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Defines device-side elementwise operations on TensorView. Note, the operations defined
in this header are not specialized for any particular data layout and are therefore not
intended to offer the best possible performance. Rather, they are intended to be generic
reference implementations to support the CUTLASS unit tests.
*/
#pragma once
// Cutlass includes
#include "cutlass/cutlass.h"
#include "cutlass/tensor_view.h"
#include "cutlass/util/reference/device/tensor_foreach.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace device {
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
struct TensorReLuFunc {
/// View type
using TensorView = TensorView<Element, Layout>;
/// Coordinate in tensor's index space
using TensorCoord = typename TensorView::TensorCoord;
/// Parameters structure
struct Params {
//
// Data members
//
TensorView view;
Element threshold;
//
// Methods
//
Params(
TensorView view_ = TensorView(),
Element threshold_ = Element(0)
):
view(view_), threshold(threshold_) {
}
};
//
// Data members
//
Params params;
//
// Methods
//
CUTLASS_DEVICE
TensorReLuFunc(Params const ¶ms): params(params) {
}
CUTLASS_DEVICE
void operator()(TensorCoord const &coord) {
Element const & value = params.view.at(coord);
params.view.at(coord) = (value < params.threshold) ? params.threshold : value;
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Apply ReLu on a tensor
template <
typename Element, ///< Element type
typename Layout> ///< Layout function
void TensorReLu(
TensorView<Element, Layout> view, ///< destination tensor
Element threshold = Element(0)) { ///< ReLu threshold
using Func = detail::TensorReLuFunc<Element, Layout>;
using Params = typename Func::Params;
TensorForEach<Func, Layout::kRank, Params>(
view.extent(),
Params(view, threshold)
);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| 4,589 | C | 31.323943 | 100 | 0.584877 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/reference/device/kernel/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// Explicitly naming types needed by this template can be cumbersome, particularly for the
/// accumulator type, so a function argument 'initial_accum' is exposed. Passing
/// AccumulatorType(0) as the last function argument can be easier than naming all template
/// arguments explicitly.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum) {
static_assert(
LayoutA::kRank == 2 &&
LayoutB::kRank == 2 &&
LayoutC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn)
);
// Launch a GEMM kernel
kernel::Gemm<
TensorRef<ElementA, LayoutA>,
TensorRef<ElementB, LayoutB>,
TensorRef<ElementC, LayoutC>,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
tensor_d,
initial_accum
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
///
/// This assumes the accumulator type is the same type as the scalars.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<ElementC, ScalarType>
>
void compute_gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b,
ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum) {
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, InnerProductOp, ConvertOp>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_c,
initial_accum);
}
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp = cutlass::arch::OpMultiplyAdd
>
struct Gemm;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, arch::OpMultiplyAdd> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for multiply-add-saturate
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpMultiplyAddSaturate> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, multiply_add<AccumulatorType>,
NumericConverterClamp<ElementC, ScalarType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Parital specialization for XOR-popc
template <typename ElementA, typename LayoutA, typename ElementB,
typename LayoutB, typename ElementC, typename LayoutC,
typename ScalarType, typename AccumulatorType>
struct Gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC, ScalarType,
AccumulatorType, arch::OpXorPopc> {
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, initial_accum);
}
void operator()(gemm::GemmCoord problem_size, ScalarType alpha,
TensorRef<ElementA, LayoutA> tensor_a,
TensorRef<ElementB, LayoutB> tensor_b, ScalarType beta,
TensorRef<ElementC, LayoutC> tensor_c,
TensorRef<ElementC, LayoutC> tensor_d,
AccumulatorType initial_accum = AccumulatorType(0)) {
static_assert(
LayoutA::kRank == 2 && LayoutB::kRank == 2 && LayoutC::kRank == 2,
"Tensors must be of rank 2");
compute_gemm<ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
ScalarType, AccumulatorType, xor_add<AccumulatorType>>(
problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, tensor_d, initial_accum);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Batched GEMM
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a batch of GEMMs over a set of matrices of common dimension.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType,
typename InnerProductOp,
typename ConvertOp
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c,
AccumulatorType initial_accum) {
static_assert(
TensorRefCollectionA::kRank == 2 &&
TensorRefCollectionB::kRank == 2 &&
TensorRefCollectionC::kRank == 2, "Tensors must be of rank 2");
// Blocking structure potentially improves performance of reference implementation
// with a minor increase in complexity.
//
// Note, this reference implementation is NOT expected to approach peak performance.
using OutputTile = MatrixShape<4, 4>;
dim3 block(16, 8);
dim3 grid(
(problem_size.m() + block.x * OutputTile::kRow - 1) / (block.x * OutputTile::kRow),
(problem_size.n() + block.y * OutputTile::kColumn - 1) / (block.y * OutputTile::kColumn),
batch_count
);
// Launch a GEMM kernel
kernel::BatchedGemm<
TensorRefCollectionA,
TensorRefCollectionB,
TensorRefCollectionC,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
><<< grid, block >>>(
problem_size,
alpha,
tensor_a,
tensor_b,
beta,
tensor_c,
initial_accum
);
}
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
//
// TensorRefCollection* is a type satisfying the TensorRefCollection concept.
//
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType
>
void BatchedGemm(
gemm::GemmCoord problem_size,
int batch_count,
ScalarType alpha,
TensorRefCollectionA const& tensor_a,
TensorRefCollectionB const& tensor_b,
ScalarType beta,
TensorRefCollectionC &tensor_c) {
BatchedGemm(problem_size, alpha, tensor_a, tensor_b, beta, tensor_c, ScalarType(0));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
| 14,296 | C | 36.03886 | 100 | 0.646125 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/tensor_reduce.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cmath>
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/tensor_view.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/reference/detail/linear_to_coordinate.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace reference {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp,
int kBlockSize = 128
>
__global__ void TensorTransformReducePartial(
TensorView<Element, Layout> view, /// View of the tensor to reduce over
ComputeType identity, /// Identity element of the reduction operation
ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType
TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType
ComputeType *workspace) { /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0]
int64_t idx = threadIdx.x + blockIdx.x * blockDim.x;
int64_t size = view.size();
__shared__ ComputeType scratchpad[kBlockSize];
for (; idx < size; idx += blockDim.x * gridDim.x) {
// Map linear thread ID onto tensor coordinate
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view.extent());
if (view.contains(coord)) {
// Fetch element
Element x = view.at(coord);
// Transform
identity = reduce(identity, transform(x));
}
}
scratchpad[threadIdx.x] = identity;
__syncthreads();
// One thread performs the final reduction and stores out. This could be enhanced via
// a tree reduction and pipelining.
if (threadIdx.x == 0) {
for (int i = 1; i < kBlockSize; ++i) {
identity = reduce(identity, scratchpad[i]);
}
workspace[blockIdx.x] = identity;
}
}
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp,
int kBlockSize = 128
>
__global__ void TensorTransformReducePartial(
TensorView<Element, Layout> view_A, /// View of the tensor to reduce over
TensorView<Element, Layout> view_B, /// View of the tensor to reduce over
ComputeType identity, /// Identity element of the reduction operation
ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType
TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType
ComputeType *workspace) { /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0]
int64_t idx = threadIdx.x + blockIdx.x * blockDim.x;
int64_t size = view_A.size();
__shared__ ComputeType scratchpad[kBlockSize];
for (; idx < size; idx += blockDim.x * gridDim.x) {
// Map linear thread ID onto tensor coordinate
typename Layout::TensorCoord coord;
cutlass::reference::detail::LinearToCoordinate<Layout::kRank>()(coord, idx, view_A.extent());
if (view_A.contains(coord)) {
// Fetch element
Element a = view_A.at(coord);
Element b = view_B.at(coord);
// Transform
identity = reduce(identity, transform(a, b));
}
}
scratchpad[threadIdx.x] = identity;
__syncthreads();
// One thread performs the final reduction and stores out. This could be enhanced via
// a tree reduction and pipelining.
if (threadIdx.x == 0) {
for (int i = 1; i < kBlockSize; ++i) {
identity = reduce(identity, scratchpad[i]);
}
workspace[blockIdx.x] = identity;
}
}
template <
typename ComputeType,
typename ReduceOp,
int kBlockSize = 32
>
__global__ void TensorTransformReduceFinalize(
ComputeType *workspace,
ComputeType identity,
int workspace_size,
ReduceOp reduce) {
__shared__ ComputeType scratchpad[kBlockSize];
for (int idx = threadIdx.x; idx < workspace_size; idx += kBlockSize) {
identity = reduce(identity, workspace[idx]);
}
scratchpad[threadIdx.x] = identity;
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 1; i < kBlockSize; ++i) {
identity = reduce(identity, scratchpad[i]);
}
workspace[0] = identity;
}
}
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Transform-reduce operation over the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view, /// View of the tensor to reduce over
ComputeType identity, /// Identity element of the reduction operation
ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType
TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType
ComputeType *workspace, /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0]
int workspace_size, /// Number of elements in workspace
cudaStream_t stream = nullptr, /// CUDA stream to launch into
bool copy_out = true /// If true, the value of workspace[0] is copied to host and returned. Otherwise, `identity` is returned.
) {
int const kBlockSize = 128;
dim3 block(kBlockSize, 1);
dim3 grid(workspace_size, 1);
kernel::TensorTransformReducePartial<
Element, Layout, ComputeType, ReduceOp, TransformOp, kBlockSize
><<< grid, block, 0, stream >>>(
view, identity, reduce, transform, workspace
);
int const kFinalizeBlockSize = 32;
kernel::TensorTransformReduceFinalize<
ComputeType, ReduceOp, kFinalizeBlockSize
><<< dim3(1, 1), dim3(kFinalizeBlockSize, 1), 0, stream >>>(
workspace, identity, workspace_size, reduce
);
if (copy_out) {
cudaError_t result = cudaMemcpy(&identity, workspace, sizeof(identity), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("cudaMemcpy() failed");
}
}
return identity;
}
/// Transform-reduce operation over the elements of two tensors, zipped together
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view_A, /// View of the tensor to reduce over
TensorView<Element, Layout> view_B, /// View of the tensor to reduce over
ComputeType identity, /// Identity element of the reduction operation
ReduceOp reduce, /// Reduces an accumulated value with a transformed element: f(ComputeType, ComputeType) => ComputeType
TransformOp transform, /// Transforms the tensor element to ComputeType: g(Element) => ComputeType
ComputeType *workspace, /// Device-side workspace for accumulating partial results. The reduced element is stored in workspace[0]
int workspace_size, /// Number of elements in workspace
cudaStream_t stream = nullptr, /// CUDA stream to launch into
bool copy_out = true /// If true, the value of workspace[0] is copied to host and returned. Otherwise, `identity` is returned.
) {
if (view_A.extent() != view_B.extent()) {
throw std::runtime_error("Extents must be equal.");
}
int const kBlockSize = 128;
dim3 block(kBlockSize, 1);
dim3 grid(workspace_size, 1);
kernel::TensorTransformReducePartial<
Element, Layout, ComputeType, ReduceOp, TransformOp, kBlockSize
><<< grid, block, 0, stream >>>(
view_A, view_B, identity, reduce, transform, workspace
);
int const kFinalizeBlockSize = 32;
kernel::TensorTransformReduceFinalize<
ComputeType, ReduceOp, kFinalizeBlockSize
><<< dim3(1, 1), dim3(kFinalizeBlockSize, 1), 0, stream >>>(
workspace, identity, workspace_size, reduce
);
if (copy_out) {
cudaError_t result = cudaMemcpy(&identity, workspace, sizeof(identity), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
throw std::runtime_error("cudaMemcpy() failed");
}
}
return identity;
}
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view,
ComputeType identity,
ReduceOp reduce,
TransformOp transform,
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
// Optionally query for the SM count to size the workspace.
if (!workspace_size) {
int device_idx = 0;
cudaDeviceProp prop;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
result = cudaGetDeviceProperties(&prop, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProp() failed");
}
workspace_size = int(prop.multiProcessorCount);
}
DeviceAllocation<ComputeType> workspace(workspace_size);
ComputeType output = TensorTransformReduce(
view,
identity,
reduce,
transform,
workspace.get(),
workspace_size,
stream,
true);
return output;
}
/// Transform-reduce operation over the elements of a tensor. This helper allocates the device-side
/// workspace
template <
typename Element,
typename Layout,
typename ComputeType,
typename ReduceOp,
typename TransformOp
>
ComputeType TensorTransformReduce(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity,
ReduceOp reduce,
TransformOp transform,
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
// Optionally query for the SM count to size the workspace.
if (!workspace_size) {
int device_idx = 0;
cudaDeviceProp prop;
cudaError_t result = cudaGetDevice(&device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
result = cudaGetDeviceProperties(&prop, device_idx);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProp() failed");
}
workspace_size = int(prop.multiProcessorCount);
}
DeviceAllocation<ComputeType> workspace(workspace_size);
ComputeType output = TensorTransformReduce(
view_A,
view_B,
identity,
reduce,
transform,
workspace.get(),
workspace_size,
stream,
true);
return output;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to compute the sum of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSum(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType(),
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
plus<ComputeType> reduce;
NumericConverter<ComputeType, Element> transform;
return TensorTransformReduce(
view, identity, reduce, transform, stream, workspace_size);
}
/// Helper to compute the sum of the squares of the elements of a tensor
template <
typename Element,
typename Layout,
typename ComputeType = Element
>
ComputeType TensorSumSq(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType(),
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
plus<ComputeType> reduce;
magnitude_squared<Element, ComputeType> transform;
return TensorTransformReduce(
view, identity, reduce, transform, stream, workspace_size);
}
/// Helper to compute the norm of the elements of a tensor.
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNorm(
TensorView<Element, Layout> view,
ComputeType identity = ComputeType(),
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
return std::sqrt(TensorSumSq(view, identity, stream, workspace_size));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to compute the sum of the squares of the differences of two tensors
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorSumSqDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType(),
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
plus<ComputeType> reduce;
magnitude_squared_difference<Element, ComputeType> transform;
return TensorTransformReduce(
view_A, view_B, identity, reduce, transform, stream, workspace_size);
}
/// Helper to compute the norm of the tensor computed as the difference of two tensors in memory
template <
typename Element,
typename Layout,
typename ComputeType = double
>
ComputeType TensorNormDiff(
TensorView<Element, Layout> view_A,
TensorView<Element, Layout> view_B,
ComputeType identity = ComputeType(),
cudaStream_t stream = nullptr,
int workspace_size = 0
) {
return std::sqrt(TensorSumSqDiff(view_A, view_B, identity, stream, workspace_size));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 15,964 | C | 30.242661 | 145 | 0.658043 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/convolution.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for convolution in device-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
namespace cutlass {
namespace reference {
namespace device {
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Conv2d device reference kernel
////////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d Fprop kernel - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t npq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t npq = npq_start + m;
thread_n[m] = int(npq / PQ);
int64_t residual = npq % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
int c_per_group = problem_size.C / problem_size.groups;
int k_per_group = problem_size.K / problem_size.groups;
// Compute convolution
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int C = 0; C < problem_size.C; ++C) {
// Get group id of currnet channel
int c_group_idx = C / c_per_group;
// Load from activations tensor
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (thread_n[m] < problem_size.N && h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W) {
element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], h, w, C}));
}
else {
element_A[m] = ElementAccumulator();
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
int k_group_idx = thread_k / k_per_group;
if (thread_k < problem_size.K && k_group_idx == c_group_idx) {
element_B[n] = ElementAccumulator(tensor_w.at({thread_k, R, S, C % c_per_group}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_p[m] < problem_size.P && thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_p[m], thread_q[m], thread_k}));
}
tensor_y_out.at({thread_n[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d Fprop kernel - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t nzpq_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int k_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_z[kThreadM];
int thread_p[kThreadM];
int thread_q[kThreadM];
// Compute N, Z, P, Q coordinates for each row of a thread's tile
int64_t PQ = int64_t(problem_size.P) * problem_size.Q;
int64_t ZPQ = PQ * problem_size.Z;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t nzpq = nzpq_start + m;
thread_n[m] = int(nzpq / ZPQ);
int64_t residual = nzpq % ZPQ;
thread_z[m] = int(residual / PQ);
residual = residual % PQ;
thread_p[m] = int(residual / problem_size.Q);
thread_q[m] = int(residual % problem_size.Q);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int T = 0; T < problem_size.T; ++T) {
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int C = 0; C < problem_size.C; ++C) {
// Load from activations tensor
int filter_t = T;
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - T;
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int d = thread_z[m] * problem_size.stride_d - problem_size.pad_d + filter_t * problem_size.dilation_d;
int h = thread_p[m] * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = thread_q[m] * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
if (thread_n[m] < problem_size.N &&
d >= 0 && d < problem_size.D &&
h >= 0 && h < problem_size.H &&
w >= 0 && w < problem_size.W) {
element_A[m] = ElementAccumulator(tensor_x.at({thread_n[m], d, h, w, C}));
}
else {
element_A[m] = ElementAccumulator();
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
element_B[n] = ElementAccumulator(tensor_w.at({thread_k, T, R, S, C}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (C)
} // for (S)
} // for (R)
} // for (T)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N &&
thread_z[m] < problem_size.Z &&
thread_p[m] < problem_size.P &&
thread_q[m] < problem_size.Q) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_k = k_start + n;
if (thread_k < problem_size.K) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_y_in.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}));
}
tensor_y_out.at({thread_n[m], thread_z[m], thread_p[m], thread_q[m], thread_k}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
} // for (n)
}
} // for (m)
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d dgrad kernel - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv2dDgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t nhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_h[kThreadM];
int thread_w[kThreadM];
// Compute N, H, W coordinates for each row of a thread's tile
int64_t HW = int64_t(problem_size.H) * problem_size.W;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t nhw = nhw_start + m;
thread_n[m] = int(nhw / HW);
int64_t residual = nhw % HW;
thread_h[m] = int(residual / problem_size.W);
thread_w[m] = int(residual % problem_size.W);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int K = 0; K < problem_size.K; ++K) {
// Load from activations tensor
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w;
element_A[m] = ElementAccumulator();
if (p >= 0 && !(p % problem_size.stride_h) && q >= 0 && !(q % problem_size.stride_w)) {
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (thread_n[m] < problem_size.N && p < problem_size.P && q < problem_size.Q) {
element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], p, q, K}));
}
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_w.at({K, R, S, thread_c}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N && thread_h[m] < problem_size.H && thread_w[m] < problem_size.W) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_h[m], thread_w[m], thread_c}));
}
tensor_dx_out.at({thread_n[m], thread_h[m], thread_w[m], thread_c}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d dgrad kernel - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 16, // shape of a threadblock in units of threads
int kCtaShapeN = 8 // shape of a threadblock in units of threads
>
__global__ void Conv3dDgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int64_t ndhw_start = int64_t(blockIdx.x) * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int c_start = blockIdx.y * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_n[kThreadM];
int thread_d[kThreadM];
int thread_h[kThreadM];
int thread_w[kThreadM];
// Compute N, H, W coordinates for each row of a thread's tile
int64_t HW = int64_t(problem_size.H) * problem_size.W;
int64_t DHW = HW * problem_size.D;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int64_t ndhw = ndhw_start + m;
thread_n[m] = int(ndhw / DHW);
int64_t residual = ndhw % DHW;
thread_d[m] = int(residual / HW);
residual = residual % HW;
thread_h[m] = int(residual / problem_size.W);
thread_w[m] = int(residual % problem_size.W);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int T = 0; T < problem_size.T; ++T) {
for (int R = 0; R < problem_size.R; ++R) {
for (int S = 0; S < problem_size.S; ++S) {
for (int K = 0; K < problem_size.K; ++K) {
// Load from activations tensor
int filter_t = T;
int filter_r = R;
int filter_s = S;
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - T;
filter_r = problem_size.R - 1 - R;
filter_s = problem_size.S - 1 - S;
}
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int z = thread_d[m] + problem_size.pad_d - filter_t * problem_size.dilation_d;
int p = thread_h[m] + problem_size.pad_h - filter_r * problem_size.dilation_h;
int q = thread_w[m] + problem_size.pad_w - filter_s * problem_size.dilation_w;
element_A[m] = ElementAccumulator();
if (z >= 0 && !(z % problem_size.stride_d) &&
p >= 0 && !(p % problem_size.stride_h) &&
q >= 0 && !(q % problem_size.stride_w)) {
z = z / problem_size.stride_d;
p = p / problem_size.stride_h;
q = q / problem_size.stride_w;
if (thread_n[m] < problem_size.N && z < problem_size.Z && p < problem_size.P && q < problem_size.Q) {
element_A[m] = ElementAccumulator(tensor_dy.at({thread_n[m], z, p, q, K}));
}
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_w.at({K, T, R, S, thread_c}));
}
else {
element_B[n] = ElementAccumulator();
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (C)
} // for (S)
} // for (R)
} // for (T)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
if (thread_n[m] < problem_size.N &&
thread_d[m] < problem_size.D &&
thread_h[m] < problem_size.H &&
thread_w[m] < problem_size.W) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int thread_c = c_start + n;
if (thread_c < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dx_in.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}));
}
tensor_dx_out.at({thread_n[m], thread_d[m], thread_h[m], thread_w[m], thread_c}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// Conv2d wgrad kernel - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 8, // shape of a threadblock in units of threads
int kCtaShapeN = 16 // shape of a threadblock in units of threads
>
__global__ void Conv2dWgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int64_t rsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_r[kThreadN];
int thread_s[kThreadN];
int thread_c[kThreadN];
// Compute R, S, C coordinates for each row of a thread's tile
int64_t SC = int64_t(problem_size.S) * problem_size.C;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int64_t rsc = rsc_start + n;
int64_t residual = rsc % SC;
thread_r[n] = int(rsc / SC);
thread_s[n] = int(residual / problem_size.C);
thread_c[n] = int(residual % problem_size.C);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int N = 0; N < problem_size.N; ++N) {
for (int P = 0; P < problem_size.P; ++P) {
for (int Q = 0; Q < problem_size.Q; ++Q) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
element_A[m] = ElementAccumulator();
if (thread_k < problem_size.K) {
element_A[m] = ElementAccumulator(tensor_dy.at({N, P, Q, thread_k}));
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
// Load from activations tensor
int filter_r = thread_r[n];
int filter_s = thread_s[n];
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_r = problem_size.R - 1 - filter_r;
filter_s = problem_size.S - 1 - filter_s;
}
int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
element_B[n] = ElementAccumulator();
if (h >= 0 && h < problem_size.H && w >= 0 && w < problem_size.W && thread_c[n] < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_x.at({N, h, w, thread_c[n]}));
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
}
}
}
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
if (thread_k < problem_size.K) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
if (thread_r[n] < problem_size.R && thread_s[n] < problem_size.S && thread_c[n] < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}));
}
tensor_dw_out.at({thread_k, thread_r[n], thread_s[n], thread_c[n]}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
// Conv3d wgrad kernel - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>,
int kThreadM = 2, // shape of a thread's tile in the GEMM M dimension
int kThreadN = 4, // shape of a thread's tile in the GEMM N dimension
int kCtaShapeM = 8, // shape of a threadblock in units of threads
int kCtaShapeN = 16 // shape of a threadblock in units of threads
>
__global__ void Conv3dWgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta
) {
ConvertOp convert_op;
InnerProductOp inner_product_op;
ElementAccumulator element_A[kThreadM];
ElementAccumulator element_B[kThreadN];
ElementAccumulator accum[kThreadM][kThreadN];
int k_start = blockIdx.x * kCtaShapeM * kThreadM + threadIdx.x * kThreadM;
int64_t trsc_start = int64_t(blockIdx.y) * kCtaShapeN * kThreadN + threadIdx.y * kThreadN;
int thread_t[kThreadN];
int thread_r[kThreadN];
int thread_s[kThreadN];
int thread_c[kThreadN];
// Compute R, S, C coordinates for each row of a thread's tile
int64_t SC = int64_t(problem_size.S) * problem_size.C;
int64_t RSC = SC * problem_size.R;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
int64_t trsc = trsc_start + n;
thread_t[n] = int(trsc / RSC);
int64_t residual = trsc % RSC;
thread_r[n] = int(residual / SC);
residual = residual % SC;
thread_s[n] = int(residual / problem_size.C);
thread_c[n] = int(residual % problem_size.C);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = ElementAccumulator();
}
}
// Compute convolution
for (int N = 0; N < problem_size.N; ++N) {
for (int Z = 0; Z < problem_size.Z; ++Z) {
for (int P = 0; P < problem_size.P; ++P) {
for (int Q = 0; Q < problem_size.Q; ++Q) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
element_A[m] = ElementAccumulator();
if (thread_k < problem_size.K) {
element_A[m] = ElementAccumulator(tensor_dy.at({N, Z, P, Q, thread_k}));
}
}
// Load from filters tensor
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
// Load from activations tensor
int filter_t = thread_t[n];
int filter_r = thread_r[n];
int filter_s = thread_s[n];
if (problem_size.mode == cutlass::conv::Mode::kConvolution) {
filter_t = problem_size.T - 1 - filter_t;
filter_r = problem_size.R - 1 - filter_r;
filter_s = problem_size.S - 1 - filter_s;
}
int d = Z * problem_size.stride_d - problem_size.pad_w + filter_t * problem_size.dilation_d;
int h = P * problem_size.stride_h - problem_size.pad_h + filter_r * problem_size.dilation_h;
int w = Q * problem_size.stride_w - problem_size.pad_w + filter_s * problem_size.dilation_w;
element_B[n] = ElementAccumulator();
if (d >= 0 && d < problem_size.D &&
h >= 0 && h < problem_size.H &&
w >= 0 && w < problem_size.W &&
thread_c[n] < problem_size.C) {
element_B[n] = ElementAccumulator(tensor_x.at({N, d, h, w, thread_c[n]}));
}
}
// Accumulate matrix product
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
accum[m][n] = inner_product_op(element_A[m], element_B[n], accum[m][n]);
}
}
} // for (Q)
} // for (P)
} // for (Z)
} // for (N)
// Write out the results
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < kThreadM; ++m) {
int thread_k = k_start + m;
if (thread_k < problem_size.K) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < kThreadN; ++n) {
if (thread_t[n] < problem_size.T &&
thread_r[n] < problem_size.R &&
thread_s[n] < problem_size.S &&
thread_c[n] < problem_size.C) {
ElementCompute c_ref = ElementCompute();
if (beta != ElementCompute()) {
c_ref = ElementCompute(tensor_dw_in.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}));
}
tensor_dw_out.at({thread_k, thread_t[n], thread_r[n], thread_s[n], thread_c[n]}) = convert_op(
alpha * ElementCompute(accum[m][n]) + beta * c_ref);
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conv2d Fprop dispatcher - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dFprop(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t npq = int64_t(problem_size.N) * problem_size.P * problem_size.Q;
int64_t blocks_m = (npq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv2dFprop<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_x,
tensor_w,
tensor_y_in,
tensor_y_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Fprop dispatcher - y = fprop(x, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dFprop(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_x,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_y_in,
TensorRef<ElementC, LayoutC> tensor_y_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 4; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t nzpq = int64_t(problem_size.N) * problem_size.Z * problem_size.P * problem_size.Q;
int64_t blocks_m = (nzpq + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.K + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv3dFprop<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_x,
tensor_w,
tensor_y_in,
tensor_y_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv2d Dgrad dispatcher - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dDgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t nhw = int64_t(problem_size.N) * problem_size.H * problem_size.W;
int64_t blocks_m = (nhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv2dDgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_w,
tensor_dx_in,
tensor_dx_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Dgrad dispatcher - dx = dgrad(dy, w)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dDgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_w,
TensorRef<ElementC, LayoutC> tensor_dx_in,
TensorRef<ElementC, LayoutC> tensor_dx_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 16; // shape of a threadblock in units of threads
int const kCtaShapeN = 8; // shape of a threadblock in units of threads
int64_t ndhw = int64_t(problem_size.N) * problem_size.D * problem_size.H * problem_size.W;
int64_t blocks_m = (ndhw + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid(uint32_t(blocks_m), (problem_size.C + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN));
kernel::Conv3dDgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_w,
tensor_dx_in,
tensor_dx_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv2d Wgrad dispatcher - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2dWgrad(
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 8; // shape of a threadblock in units of threads
int const kCtaShapeN = 16; // shape of a threadblock in units of threads
int64_t rsc = int64_t(problem_size.R) * problem_size.S * problem_size.C;
int64_t blocks_n = (rsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n));
kernel::Conv2dWgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_x,
tensor_dw_in,
tensor_dw_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/// Conv3d Wgrad dispatcher - dw = wgrad(dy, x)
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3dWgrad(
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_dy,
TensorRef<ElementB, LayoutB> tensor_x,
TensorRef<ElementC, LayoutC> tensor_dw_in,
TensorRef<ElementC, LayoutC> tensor_dw_out,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
//
// Blocking factors improve performance of reference implementation
//
int const kThreadM = 2; // shape of a thread's tile in the GEMM M dimension
int const kThreadN = 4; // shape of a thread's tile in the GEMM N dimension
int const kCtaShapeM = 8; // shape of a threadblock in units of threads
int const kCtaShapeN = 16; // shape of a threadblock in units of threads
int64_t trsc = int64_t(problem_size.T) * problem_size.R * problem_size.S * problem_size.C;
int64_t blocks_n = (trsc + (kCtaShapeN * kThreadN) - 1) / (kCtaShapeN * kThreadN);
dim3 block(kCtaShapeM, kCtaShapeN);
dim3 grid((problem_size.K + (kCtaShapeM * kThreadM) - 1) / (kCtaShapeM * kThreadM), uint32_t(blocks_n));
kernel::Conv3dWgrad<
ElementA,
LayoutA,
ElementB,
LayoutB,
ElementC,
LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp,
InnerProductOp,
kThreadM,
kThreadN,
kCtaShapeM,
kCtaShapeN
><<< grid, block, 0, stream >>>(
problem_size,
tensor_dy,
tensor_x,
tensor_dw_in,
tensor_dw_out,
alpha,
beta
);
cudaError_t result = cudaPeekAtLastError();
if (result != cudaSuccess) {
return Status::kErrorInternal;
}
return Status::kSuccess;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Generic 2D convolution targeting Conv2dFprop, Conv2dDgrad, and Conv2dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv2d(
conv::Operator convolutional_operator,
conv::Conv2dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
return Conv2dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
case conv::Operator::kDgrad:
return Conv2dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
case conv::Operator::kWgrad:
return Conv2dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
break;
default: break;
}
return Status::kErrorNotSupported;
}
/// Generic 3D convolution targeting Conv3dFprop, Conv3dDgrad, and Conv3dWgrad.
template <
typename ElementA,
typename LayoutA,
typename ElementB,
typename LayoutB,
typename ElementC,
typename LayoutC,
typename ElementCompute,
typename ElementAccumulator = ElementCompute,
typename ConvertOp = NumericConverter<ElementC, ElementCompute>,
typename InnerProductOp = multiply_add<ElementAccumulator>
>
Status Conv3d(
conv::Operator convolutional_operator,
conv::Conv3dProblemSize problem_size,
TensorRef<ElementA, LayoutA> tensor_A,
TensorRef<ElementB, LayoutB> tensor_B,
TensorRef<ElementC, LayoutC> tensor_C,
TensorRef<ElementC, LayoutC> tensor_D,
ElementCompute alpha,
ElementCompute beta,
cudaStream_t stream = nullptr) {
switch (convolutional_operator) {
case conv::Operator::kFprop:
return Conv3dFprop<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
case conv::Operator::kDgrad:
return Conv3dDgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
case conv::Operator::kWgrad:
return Conv3dWgrad<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementCompute,
ElementAccumulator,
ConvertOp, InnerProductOp
>(problem_size, tensor_A, tensor_B, tensor_C, tensor_D, alpha, beta, stream);
default: break;
}
return Status::kErrorNotSupported;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace device
} // namespace reference
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 48,350 | C | 30.194194 | 116 | 0.606308 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/thread/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
namespace thread {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Thread-level blocked general matrix product.
//
// Note, this is a reference implementation. Performance is not expected to approach peak.
//
template <
typename TensorRefA,
typename TensorRefB,
typename TensorRefC,
typename ScalarType,
typename AccumulatorType,
typename OutputTile,
typename InnerProductOp = multiply_add<AccumulatorType>,
typename ConvertOp = NumericConverter<typename TensorRefC::Element, ScalarType>
>
struct Gemm {
using ElementA = typename TensorRefA::Element;
using ElementB = typename TensorRefB::Element;
using ElementC = typename TensorRefC::Element;
//
// Data members
//
/// Tile for A operand
ElementA A_tile[OutputTile::kColumn];
/// Tile for B operand
ElementB B_tile[OutputTile::kRow];
/// Tile for Accumulator
AccumulatorType accum[OutputTile::kColumn][OutputTile::kRow];
//
// Methods
//
/// Constructor
CUTLASS_HOST_DEVICE
Gemm(AccumulatorType initial_accum = AccumulatorType(0)) {
// Clear fetch registers
for (int i = 0; i < OutputTile::kColumn; ++i) {
A_tile[i] = ElementA(0);
}
for (int j = 0; j < OutputTile::kColumn; ++j) {
B_tile[j] = ElementB(0);
}
// Clear accumulators
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < OutputTile::kColumn; ++j) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < OutputTile::kRow; ++i) {
accum[j][i] = initial_accum;
}
}
}
/// Computes a matrix product
CUTLASS_HOST_DEVICE
Gemm & multiply_add(
gemm::GemmCoord problem_size,
TensorRefA tensor_a,
TensorRefB tensor_b,
MatrixCoord output_coord = MatrixCoord()) {
InnerProductOp inner_product_op;
// Loop over the GEMM K dimension
CUTLASS_PRAGMA_NO_UNROLL
for (int k = 0; k < problem_size.k(); ++k) {
// Fetch a slice of the A matrix
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < OutputTile::kColumn; ++i) {
if (output_coord.row() + i < problem_size.m()) {
A_tile[i] = tensor_a.at(make_Coord(output_coord.row() + i, k));
}
}
// Fetch a slice of the B matrix
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < OutputTile::kRow; ++j) {
if (output_coord.column() + j < problem_size.n()) {
B_tile[j] = tensor_b.at(make_Coord(k, output_coord.column() + j));
}
}
// Compute an accumulated matrix product
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < OutputTile::kRow; ++j) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < OutputTile::kColumn; ++i) {
accum[j][i] = inner_product_op(A_tile[i], B_tile[j], accum[j][i]);
}
}
}
return *this;
}
/// Performs linear scaling of matrix product and updates output tensor
CUTLASS_HOST_DEVICE
Gemm & epilogue(
gemm::GemmCoord problem_size,
ScalarType alpha,
ScalarType beta,
TensorRefC tensor_c,
TensorRefC tensor_d,
MatrixCoord output_coord = MatrixCoord()) {
ConvertOp convert_op;
// Update the output tensor
for (int j = 0; j < OutputTile::kRow; ++j) {
for (int i = 0; i < OutputTile::kColumn; ++i) {
MatrixCoord coord = output_coord + MatrixCoord(i, j);
if (coord.row() < problem_size.m() && coord.column() < problem_size.n()) {
tensor_d.at(coord) = convert_op(
alpha * ScalarType(accum[j][i]) +
beta * ScalarType(tensor_c.at(coord))
);
}
}
}
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace thread
} // namespace device
} // namespace reference
} // namespace cutlass
| 5,872 | C | 30.406417 | 100 | 0.617677 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_foreach.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/subbyte_reference.h"
#include "cutlass/fast_math.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines several helpers
namespace detail {
/// Helper to perform for-each operation
template <typename Func, int Rank, int RankRemaining>
struct TensorForEachHelper {
/// Constructor for general rank
__inline__ __device__
TensorForEachHelper(Func &func, Coord<Rank> const &size, Coord<Rank> &coord, int64_t index) {
int64_t product = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank - RankRemaining; i < Rank; ++i) {
product *= size[i];
}
coord[Rank - 1 - RankRemaining] = index / product;
int64_t remaining = index % product;
TensorForEachHelper<Func, Rank, RankRemaining-1>(func, size, coord, remaining);
}
};
/// Helper to perform for-each operation
template <typename Func, int Rank>
struct TensorForEachHelper<Func, Rank, 0> {
/// Constructor for fastest chaning rank
__inline__ __device__
TensorForEachHelper(Func &func, Coord<Rank> const &size, Coord<Rank> &coord, int64_t index) {
coord[Rank - 1] = index;
if (coord < size) {
func(coord);
}
}
};
} // namespace detail
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel calls a functor for each element in a tensor's index space
template <typename Func, int Rank, typename Params>
__global__ void TensorForEach(Coord<Rank> size, Params params = Params()) {
Func func(params);
int64_t index = threadIdx.x + blockIdx.x * blockDim.x;
int64_t max_index = 1;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
max_index *= size[i];
}
CUTLASS_PRAGMA_NO_UNROLL
while (index < max_index) {
Coord<Rank> coord;
detail::TensorForEachHelper<Func, Rank, Rank - 1>(func, size, coord, index);
index += blockDim.x * gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel calls a functor for each element along a tensor's diagonal
template <typename Func, int Rank, typename Params>
__global__ void TensorDiagonalForEach(Coord<Rank> size, Params params, int start, int end) {
Func func(params);
int64_t index = threadIdx.x + blockIdx.x * blockDim.x + start;
if (index < end) {
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] = index;
}
func(coord);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, typename Func>
__global__ void BlockForEach(
Element *ptr,
size_t capacity,
typename Func::Params params) {
Func func(params);
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
for (; index < capacity; index += blockDim.x * gridDim.x) {
ReferenceFactory<Element>::get(ptr, index) = func();
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
| 5,126 | C | 31.04375 | 100 | 0.608662 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/kernel/tensor_elementwise.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <curand_kernel.h>
#include "cutlass/cutlass.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize tensor to uniform random distribution
template <typename T>
__global__ void TensorInitializeUniform(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
double range = dist.uniform.max - dist.uniform.min;
double rnd = curand_uniform(&rng_state[threadIdx.x]);
rnd = dist.uniform.min + range * rnd;
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
if (dist.int_scale >= 0) {
rnd = double(int(rnd * double(1 << dist.int_scale)));
*tensor = T(rnd / double(1 << dist.int_scale));
} else {
*tensor = T(rnd);
}
tensor += ldm;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize tensor to uniform distribution
template <typename T>
__global__ void TensorInitializeGaussian(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
// Random values are cast to integer after scaling by a power of two to facilitate error
// testing
double rnd = curand_normal(&rng_state[threadIdx.x]);
rnd = dist.gaussian.mean + dist.gaussian.stddev * rnd;
if (dist.int_scale >= 0) {
rnd = double(int(rnd * double(1 << dist.int_scale)));
*tensor = T(rnd / double(1 << dist.int_scale));
} else {
*tensor = T(rnd);
}
}
}
}
/// Kernel to initialize tensor to an identity matrix
template <typename T>
__global__ void TensorInitializeLinear(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
*tensor =
dist.linear.offset + dist.linear.delta_row * c_idx + dist.linear.delta_column * s_idx;
}
}
}
/// Kernel to initialize tensor to an identity matrix
template <typename T>
__global__ void TensorInitializeIdentity(
Distribution dist, int64_t seed, int dim_contiguous, int dim_strided, T *tensor, int ldm) {
__shared__ curandState_t rng_state[1024];
uint64_t gtid = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
curand_init(seed, gtid, 0, &rng_state[threadIdx.x]);
int c_idx = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = blockIdx.y * blockDim.x;
tensor += s_idx * ldm + c_idx;
for (int s_offset = 0; s_offset < blockDim.x; ++s_offset, ++s_idx) {
if (s_idx < dim_strided && c_idx < dim_contiguous) {
*tensor = (c_idx == s_idx ? T(1) : T(0));
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
| 6,198 | C | 35.680473 | 100 | 0.620361 |
NVIDIA/warp/warp/native/cutlass/tools/util/include/cutlass/util/reference/device/kernel/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Reference implementation for GEMM in host-side code.
*/
#pragma once
#include "cutlass/coord.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/reference/device/thread/gemm.h"
namespace cutlass {
namespace reference {
namespace device {
namespace kernel {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename TensorRefA,
typename TensorRefB,
typename TensorRefC,
typename ScalarType,
typename AccumulatorType,
typename OutputTile,
typename InnerProductOp,
typename ConvertOp
>
__global__ void Gemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRefA tensor_a,
TensorRefB tensor_b,
ScalarType beta,
TensorRefC tensor_c,
TensorRefC tensor_d,
AccumulatorType initial_accum) {
// Map each thread to a unique tile of the output matrix
MatrixCoord output_coord(
MatrixCoord::Index((threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kRow),
MatrixCoord::Index((threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kColumn)
);
// Compute the general matrix product
thread::Gemm<
TensorRefA,
TensorRefB,
TensorRefC,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
> gemm(initial_accum);
gemm.multiply_add(
problem_size,
tensor_a,
tensor_b,
output_coord);
gemm.epilogue(problem_size, alpha, beta, tensor_c, tensor_d, output_coord);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes a general matrix product among matrices (tensors of rank=2) pointed to by TensorRef
/// objects.
template <
typename TensorRefCollectionA,
typename TensorRefCollectionB,
typename TensorRefCollectionC,
typename ScalarType,
typename AccumulatorType,
typename OutputTile,
typename InnerProductOp,
typename ConvertOp
>
__global__ void BatchedGemm(
gemm::GemmCoord problem_size,
ScalarType alpha,
TensorRefCollectionA tensor_collection_a,
TensorRefCollectionB tensor_collection_b,
ScalarType beta,
TensorRefCollectionC tensor_collection_c,
AccumulatorType initial_accum) {
// Obtain batch ID
int batch_id = blockIdx.z;
// Dereference based on batch_id
typename TensorRefCollectionA::TensorRef tensor_a = tensor_collection_a.at(batch_id);
typename TensorRefCollectionB::TensorRef tensor_b = tensor_collection_b.at(batch_id);
typename TensorRefCollectionC::TensorRef tensor_c = tensor_collection_c.at(batch_id);
// Map each thread to a unique tile of the output matrix
MatrixCoord output_coord(
(threadIdx.x + blockIdx.x * blockDim.x) * OutputTile::kColumn,
(threadIdx.y + blockIdx.y * blockDim.y) * OutputTile::kRow
);
// Compute the general matrix product
thread::Gemm<
typename TensorRefCollectionA::TensorRef,
typename TensorRefCollectionB::TensorRef,
typename TensorRefCollectionC::TensorRef,
ScalarType,
AccumulatorType,
OutputTile,
InnerProductOp,
ConvertOp
> gemm(initial_accum);
gemm.multiply_add(
problem_size,
tensor_a,
tensor_b,
output_coord);
gemm.epilogue(problem_size, alpha, beta, tensor_c, output_coord);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace kernel
} // namespace device
} // namespace reference
} // namespace cutlass
| 5,381 | C | 32.018405 | 100 | 0.675897 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/tensor_view_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a structure containing strides and a pointer to tensor data.
TensorView is derived from TensorRef and contributes bounds to the tensor's index space. Thus,
it is a complete mathematical object and may be used in tensor algorithms. It is decoupled from
data storage and is therefore lightweight and may be embedded in larger tensor objects or
memory structures.
See cutlass/tensor_ref.h for more details about the mapping of the logical tensor index space to
linear memory.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cmath>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/tensor_ref_planar_complex.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Data type of element stored within tensor
typename Element_,
/// Maps a Coord<Rank_> in the logical tensor index space to the internal n-D array
typename Layout_
>
class TensorViewPlanarComplex : public TensorRefPlanarComplex<Element_, Layout_> {
public:
/// Base tensor reference
using Base = cutlass::TensorRefPlanarComplex<Element_, Layout_>;
/// Mapping function from logical coordinate to internal n-D array
using Layout = Layout_;
/// TensorRef pointing to constant memory
using ConstTensorRef = typename Base::ConstTensorRef;
/// Underlying TensorRef type
using TensorRef = Base;
/// Data type of individual access
using Element = Element_;
/// Reference type to an element
using Reference = Element &;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Coordinate in storage n-D array
using Stride = typename Layout::Stride;
/// TensorView pointing to constant memory
using ConstTensorView = TensorViewPlanarComplex<
typename platform::remove_const<Element>::type const,
Layout>;
/// TensorView pointing to non-constant memory
using NonConstTensorView = TensorViewPlanarComplex<
typename platform::remove_const<Element>::type,
Layout>;
/// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a
/// scalar, but degenerate cases such as these are difficult to accommodate without
/// extensive C++ metaprogramming or support for zero-length arrays.
static_assert(kRank > 0, "Cannot define a zero-rank TensorRef");
private:
/// View extent
TensorCoord extent_;
public:
//
// Methods
//
/// Constructs a TensorView object
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(TensorCoord const &extent = TensorCoord()): extent_(extent) {
}
/// Constructs a TensorView object
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(
Element *ptr, ///< pointer to start of tensor
Layout const &layout, ///< layout object containing stride and mapping function
LongIndex imaginary_stride, ///< stride between real and imaginary part
TensorCoord const &extent ///< size of the view in logical coordinates
):
Base(ptr, layout, imaginary_stride), extent_(extent) {
}
/// Constructs a TensorView object
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(
TensorRef const &ref, ///< pointer and layout object referencing a tensor
TensorCoord const &extent ///< logical size of tensor
):
Base(ref), extent_(extent) {
}
/// Converting constructor from TensorRef to non-constant data.
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex(
NonConstTensorView const &view ///< TensorView to non-const data
):
Base(view), extent_(view.extent_) { }
/// Updates the pointer and layout object
CUTLASS_HOST_DEVICE
void reset(Element* ptr, Layout const &layout, LongIndex imaginary_stride, TensorCoord size) {
Base::reset(ptr, layout, imaginary_stride);
this->resize(extent_);
}
/// Changes the size of the view without affecting pointer or layout
CUTLASS_HOST_DEVICE
void resize(TensorCoord extent) {
this->extent_ = extent;
}
/// Returns the extent of the view (the size along each logical dimension).
CUTLASS_HOST_DEVICE
TensorCoord const& extent() const { return extent_; }
/// Returns the extent along a particular logical dimension.
CUTLASS_HOST_DEVICE
Index extent(int dim) const { return extent_.at(dim); }
/// Determines whether a location is within a tensor
CUTLASS_HOST_DEVICE
bool contains(TensorCoord const& coord) const {
CUTLASS_PRAGMA_UNROLL
for (int dim = 0; dim < kRank; ++dim) {
if (!(coord[dim] >= 0 && coord[dim] < extent(dim))) {
return false;
}
}
return true;
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
Base ref() const {
return Base(this->data(), this->layout(), this->imaginary_stride());
}
/// Returns a TensorRef pointing to the first element of the tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(this->data(), this->layout());
}
/// Returns a TensorView to const data
CUTLASS_HOST_DEVICE
ConstTensorView const_view() const {
return ConstTensorView(const_ref(), extent_);
}
/// Returns a Tensor_view given location and size quantities
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex subview(
TensorCoord extent, ///< extent of the resulting view
TensorCoord const& location = TensorCoord() ///< resulting view's origin within the old view
) const {
TensorViewPlanarComplex result(this->ref(), extent.clamp(extent_ - location));
result.add_coord_offset(location);
return result;
}
/// Returns the number of scalar elements needed to store tensor.
CUTLASS_HOST_DEVICE
size_t capacity() const {
return Base::layout().capacity(extent_);
}
/// Returns a TensorView offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex operator+(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) const {
TensorViewPlanarComplex result(*this);
result.add_pointer_offset(this->offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex& operator+=(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) {
this->add_pointer_offset(this->offset(b));
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex operator-(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) const {
TensorRef result(*this);
result.add_pointer_offset(-this->offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorViewPlanarComplex& operator-=(
TensorCoord const& b ///< offset in the logical coordinate space of the tensor
) {
this->add_pointer_offset(-this->offset(b));
return *this;
}
/// TensorRef to real-valued tensor
CUTLASS_HOST_DEVICE
cutlass::TensorView<Element, Layout> view_real() const {
return cutlass::TensorView<Element, Layout>(this->data(), this->layout(), extent_);
}
/// TensorRef to real-valued tensor
CUTLASS_HOST_DEVICE
cutlass::TensorView<Element, Layout> view_imag() const {
return cutlass::TensorView<Element, Layout>(this->imaginary_data(), this->layout(), extent_);
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs a TensorRef, deducing types from arguments.
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE TensorViewPlanarComplex<Element, Layout> make_TensorViewPlanarComplex(
Element *ptr,
Layout const &layout,
typename Layout::LongIndex imaginary_stride,
typename Layout::TensorCoord const &extent) {
return TensorViewPlanarComplex<Element, Layout>(ptr, layout, imaginary_stride, extent);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 10,250 | C | 32.943709 | 102 | 0.674537 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/tensor_ref_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a structure containing strides, bounds, and a pointer to tensor data.
*/
#pragma once
#include <cstdint>
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/tensor_ref.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element_>
struct PlanarComplexReference {
//
// Type definitions
//
using Element = Element_;
using ComplexElement = complex<Element>;
//
// Data members
//
Element *real;
Element *imag;
//
// Methods
//
CUTLASS_HOST_DEVICE
PlanarComplexReference(
Element *real_ = nullptr,
Element *imag_ = nullptr
):
real(real_), imag(imag_) { }
/// Loads the complex element
CUTLASS_HOST_DEVICE
operator complex<Element>() const {
return complex<Element>{*real, *imag};
}
/// Stores a complex element to the location pointed to by the reference
CUTLASS_HOST_DEVICE
PlanarComplexReference &operator=(complex<Element> const &rhs) {
*real = rhs.real();
*imag = rhs.imag();
return *this;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank
and layout within memory. A TensorRef combines a pointer and a Layout concept
*/
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class TensorRefPlanarComplex {
public:
/// Data type of individual access
using Element = Element_;
/// Complex element type
using ComplexElement = complex<Element>;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
static_assert(sizeof_bits<Element>::value >= 8,
"Planar complex not suitable for subbyte elements at this time");
/// Reference type to an element
using Reference = PlanarComplexReference<Element>;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// TensorRef to constant data
using ConstTensorRef = TensorRefPlanarComplex<
typename platform::remove_const<Element>::type const,
Layout>;
/// TensorRef to non-constant data
using NonConstTensorRef = TensorRefPlanarComplex<
typename platform::remove_const<Element>::type,
Layout>;
/// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a
/// scalar, but degenerate cases such as these are difficult to accommodate without
/// extensive C++ metaprogramming or support for zero-length arrays.
static_assert(kRank > 0, "Cannot define a zero-rank TensorRef");
private:
/// Pointer
Element* ptr_;
/// Layout object maps logical coordinates to linear offsets
Layout layout_;
/// Offset to imaginary part
LongIndex imaginary_stride_;
public:
//
// Methods
//
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex(
Element *ptr = nullptr, ///< pointer to start of tensor
Layout const &layout = Layout(), ///< layout object containing stride and mapping function
LongIndex imaginary_stride = 0
):
ptr_(ptr), layout_(layout), imaginary_stride_(imaginary_stride) {
}
/// Converting constructor from TensorRef to non-constant data.
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex(
NonConstTensorRef const &ref ///< TensorRef to non-const data
):
ptr_(ref.data()), layout_(ref.layout()), imaginary_stride_(ref.imaginary_stride_) { }
/// Returns a reference to constant-valued tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(ptr_, layout_, imaginary_stride_);
}
CUTLASS_HOST_DEVICE
NonConstTensorRef non_const_ref() const {
return NonConstTensorRef(
const_cast<typename platform::remove_const<Element>::type *>(ptr_),
layout_,
imaginary_stride_);
}
/// Updates only the pointer
CUTLASS_HOST_DEVICE
void reset(Element* ptr = nullptr, LongIndex imaginary_stride = 0) {
ptr_ = ptr;
imaginary_stride_ = imaginary_stride;
}
/// Updates the pointer and layout object
CUTLASS_HOST_DEVICE
void reset(Element* ptr, Layout const &layout, LongIndex imaginary_stride) {
ptr_ = ptr;
layout_ = layout;
imaginary_stride_ = imaginary_stride;
}
/// Returns true if the TensorRef is non-null
CUTLASS_HOST_DEVICE
bool good() const {
return ptr_ != nullptr;
}
/// Returns the pointer to referenced data
CUTLASS_HOST_DEVICE
Element * data() const { return ptr_; }
/// Returns the pointer to referenced data
CUTLASS_HOST_DEVICE
Element * imaginary_data() const { return ptr_ + imaginary_stride_; }
/// Returns a reference to the element at a given linear index
CUTLASS_HOST_DEVICE
Reference data(LongIndex idx) const {
return Reference(ptr_ + idx, ptr_ + idx + imaginary_stride_);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout & layout() {
return layout_;
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout layout() const {
return layout_;
}
/// Gets the stride to an imaginary element
LongIndex imaginary_stride() const {
return imaginary_stride_;
}
/// Gets the stride to an imaginary element
LongIndex &imaginary_stride() {
return imaginary_stride_;
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
Index stride(int dim) const {
return layout_.stride().at(dim);
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
Index & stride(int dim) {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
CUTLASS_HOST_DEVICE
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(TensorCoord const& coord) const {
return data(offset(coord));
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference operator[](TensorCoord const& coord) const {
return data(offset(coord));
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex & add_pointer_offset(LongIndex offset_) {
ptr_ += offset_;
return *this;
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex & add_coord_offset(TensorCoord const &coord) {
add_pointer_offset(offset(coord));
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex operator+(TensorCoord const& b) const {
TensorRefPlanarComplex result(*this);
result.add_coord_offset(b);
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex & operator+=(TensorCoord const& b) {
add_coord_offset(b);
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex operator-(TensorCoord const& b) const {
TensorRefPlanarComplex result(*this);
result.add_pointer_offset(-offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex & operator-=(TensorCoord const& b) {
add_pointer_offset(-offset(b));
return *this;
}
/// TensorRef to real-valued tensor
CUTLASS_HOST_DEVICE
cutlass::TensorRef<Element, Layout> ref_real() const {
return cutlass::TensorRef<Element, Layout>(data(), layout());
}
/// TensorRef to real-valued tensor
CUTLASS_HOST_DEVICE
cutlass::TensorRef<Element, Layout> ref_imag() const {
return cutlass::TensorRef<Element, Layout>(imaginary_data(), layout());
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs a TensorRef, deducing types from arguments.
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
TensorRefPlanarComplex<Element, Layout> make_TensorRefPlanarComplex(
Element *ptr,
Layout const &layout,
int64_t imaginary_stride) {
return TensorRefPlanarComplex<Element, Layout>(ptr, layout, imaginary_stride);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| 11,201 | C | 28.872 | 103 | 0.656816 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/tensor_coord.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a canonical coordinate for rank=4 tensors offering named indices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a canonical 4D coordinate used by tensor operations.
struct Tensor4DCoord : public Coord<4> {
/// Base class
using Base = Coord<4>;
/// Index type
using Index = typename Base::Index;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
/// Batch dimension
static int const kN = 0;
/// Height dimension
static int const kH = 1;
/// Width dimension
static int const kW = 2;
/// Channels dimension
static int const kC = 3;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Tensor4DCoord() { }
/// Constructs from Coord<4>
CUTLASS_HOST_DEVICE
Tensor4DCoord(Coord<4> const &coord): Base(coord) { }
/// Helper to construct from N, H, W, and C.
CUTLASS_HOST_DEVICE
Tensor4DCoord(Index n, Index h, Index w, Index c): Base(make_Coord(n, h, w, c)) { }
/// Helper to construct from N, H, W, and C, which are LongIndex type
CUTLASS_HOST_DEVICE
Tensor4DCoord(LongIndex n, LongIndex h, LongIndex w, LongIndex c)
: Base(make_Coord(Index(n), Index(h), Index(w), Index(c))) { }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & h() const { return this->at(kH); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & h() { return this->at(kH); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & w() const { return this->at(kW); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & w() { return this->at(kW); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index const & c() const { return this->at(kC); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index & c() { return this->at(kC); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
Tensor4DCoord operator+(Base const& b) const {
return Tensor4DCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Tensor4DCoord operator-(Base const& b) const {
return Tensor4DCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Tensor4DCoord operator*(Base const& b) const {
return Tensor4DCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Tensor4DCoord operator/(Base const& b) const {
return Tensor4DCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Tensor4DCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines a canonical 5D coordinate used by tensor operations.
struct Tensor5DCoord : public Coord<5> {
/// Base class
using Base = Coord<5>;
/// Index type
using Index = typename Base::Index;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
/// Batch dimension
static int const kN = 0;
/// Depth dimension
static int const kD = 1;
/// Height dimension
static int const kH = 2;
/// Width dimension
static int const kW = 3;
/// Channels dimension
static int const kC = 4;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
Tensor5DCoord() { }
/// Constructs from Coord<5>
CUTLASS_HOST_DEVICE
Tensor5DCoord(Coord<5> const &coord): Base(coord) { }
/// Helper to construct from N, D, H, W, and C.
CUTLASS_HOST_DEVICE
Tensor5DCoord(Index n, Index d, Index h, Index w, Index c): Base(make_Coord(n, d, h, w, c)) { }
/// Helper to construct from N, D, H, W, and C, which are LongIndex type
CUTLASS_HOST_DEVICE
Tensor5DCoord(LongIndex n, LongIndex d, LongIndex h, LongIndex w, LongIndex c)
: Base(make_Coord(Index(n), Index(d), Index(h), Index(w), Index(c))) { }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index const & d() const { return this->at(kD); }
/// Returns the batch of the coordinate
CUTLASS_HOST_DEVICE
Index & d() { return this->at(kD); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & h() const { return this->at(kH); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & h() { return this->at(kH); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & w() const { return this->at(kW); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & w() { return this->at(kW); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index const & c() const { return this->at(kC); }
/// Returns the channel of the coordinate
CUTLASS_HOST_DEVICE
Index & c() { return this->at(kC); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
Tensor5DCoord operator+(Base const& b) const {
return Tensor5DCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Tensor5DCoord operator-(Base const& b) const {
return Tensor5DCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Tensor5DCoord operator*(Base const& b) const {
return Tensor5DCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Tensor5DCoord operator/(Base const& b) const {
return Tensor5DCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Tensor5DCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 8,964 | C | 26.415902 | 100 | 0.641455 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/numeric_types.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Top-level include for all CUTLASS numeric types.
*/
#pragma once
#include "cutlass/cutlass.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the size of an element in bits
template <typename T>
struct sizeof_bits {
static int const value = int(sizeof(T) * 8);
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Definitions for 1-bit binary and 4-bit integer types
//
/// 1-bit binary type
using bin1_t = bool;
/// Defines the size of an element in bits - specialized for bin1_t
template <>
struct sizeof_bits<bin1_t> {
static int const value = 1;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <size_t... Seq>
struct index_sequence;
template <size_t N, size_t... Next>
struct index_sequence_helper : index_sequence_helper<N - 1, N - 1, Next...> {};
template <size_t... Next>
struct index_sequence_helper<0, 0, Next...> {
using type = index_sequence<0, Next...>;
};
template <size_t N>
using make_index_sequence = typename index_sequence_helper<N>::type;
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/integer_subbyte.h"
#include "cutlass/half.h"
#include "cutlass/bfloat16.h"
#include "cutlass/tfloat32.h"
#include "cutlass/float8.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
| 3,505 | C | 35.905263 | 100 | 0.572896 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#include <cuComplex.h>
#include <cuda_fp16.h>
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/half.h"
#include "cutlass/real.h"
#include "cutlass/bfloat16.h"
#include "cutlass/tfloat32.h"
#include "cutlass/fast_math.h"
#if !defined(__CUDACC_RTC__)
#include <iosfwd>
#endif
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumeraed type describing a transformation on a complex value.
enum class ComplexTransform {
kNone,
kConjugate
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines ComplexTransform inversions
template <ComplexTransform kTransform>
struct InvertComplexTransform;
/// Invert ComplexTransform from kNone to kConjugate
template <>
struct InvertComplexTransform<ComplexTransform::kNone> {
static ComplexTransform const transform = ComplexTransform::kConjugate;
};
/// Invert ComplexTransform from kConjugate to kNone
template <>
struct InvertComplexTransform<ComplexTransform::kConjugate> {
static ComplexTransform const transform = ComplexTransform::kNone;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
//
// Accessors for CUDA complex types
//
#if !defined(__CUDACC_RTC__)
/// Returns the real part of the complex number
CUTLASS_HOST_DEVICE
float const &real(cuFloatComplex const &z) { return z.x; }
/// Returns the real part of the complex number
CUTLASS_HOST_DEVICE
float &real(cuFloatComplex &z) { return z.x; }
/// Returns the real part of the complex number
CUTLASS_HOST_DEVICE
double const &real(cuDoubleComplex const &z) { return z.x; }
/// Returns the real part of the complex number
CUTLASS_HOST_DEVICE
double &real(cuDoubleComplex &z) { return z.x; }
/// Returns the imaginary part of the complex number
CUTLASS_HOST_DEVICE
float const &imag(cuFloatComplex const &z) { return z.y; }
/// Returns the imaginary part of the complex number
CUTLASS_HOST_DEVICE
float &imag(cuFloatComplex &z) { return z.y; }
/// Returns the imaginary part of the complex number
CUTLASS_HOST_DEVICE
double const &imag(cuDoubleComplex const &z) { return z.y; }
/// Returns the imaginary part of the complex number
CUTLASS_HOST_DEVICE
double &imag(cuDoubleComplex &z) { return z.y; }
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Class for representing and manipulating complex numbers with conversions from built-in CUDA
/// complex types.
template <typename T>
class complex
{
public:
/// Type alias for scalar type
using value_type = T;
private:
//
// Data members
//
/// Real part
T _real;
/// Imaginary part
T _imag;
public:
//
// Methods
//
/// Default constructor
complex() = default;
/// Constructor
CUTLASS_HOST_DEVICE
complex(T r) : _real(r), _imag(T(0)) {}
/// Constructor
CUTLASS_HOST_DEVICE
complex(T r, T i) : _real(r), _imag(i) {}
/// Constructor
template<typename A>
CUTLASS_HOST_DEVICE
complex(complex<A> const &z) : _real(static_cast<T>(z.real())), _imag(static_cast<T>(z.imag())) {}
#if !defined(__CUDACC_RTC__)
/// Conversion from cuFloatComplex
CUTLASS_HOST_DEVICE
complex(cuFloatComplex const &z) : _real(static_cast<T>(cuCrealf(z))), _imag(static_cast<T>(cuCimagf(z))) {}
/// Conversion from cuDoubleComplex
CUTLASS_HOST_DEVICE
complex(cuDoubleComplex const &z) : _real(static_cast<T>(cuCreal(z))), _imag(static_cast<T>(cuCimag(z))) {}
#endif
/// Assignment
template<typename A>
CUTLASS_HOST_DEVICE
complex<T>& operator=(complex<A> const &z)
{
_real = static_cast<T>(z.real());
_imag = static_cast<T>(z.imag());
return *this;
}
/// Equality operator
CUTLASS_HOST_DEVICE bool operator==(complex<T> const &rhs) const {
return this->real() == rhs.real() && this->imag() == rhs.imag();
}
/// Inequality operator
CUTLASS_HOST_DEVICE bool operator!=(complex<T> const &rhs) const {
return !(*this == rhs);
}
/// Addition
template <typename A>
CUTLASS_HOST_DEVICE complex<T> operator+(complex<A> const &rhs) const {
return complex<T>(this->real() + rhs.real(), this->imag() + rhs.imag());
}
/// Reduction into memory address. Components may update out of order.
template <typename OtherT>
CUTLASS_DEVICE void red(complex<OtherT> *ptr) const {
static_assert(platform::is_same<T, OtherT>::value, "Component type must match");
cutlass::red<T> reduce;
reduce(&ptr->_real, _real);
reduce(&ptr->_imag, _imag);
}
/// Reduction into memory address. Components may update out of order. (Half specialization)
CUTLASS_DEVICE void red(complex<half_t> *ptr) const {
static_assert(platform::is_same<T, half_t>::value, "Component type must match");
half2 *h2_ptr = reinterpret_cast<half2*>(ptr);
half2 h2_data = reinterpret_cast<half2&>(*this);
cutlass::red<half2> reduce;
reduce(h2_ptr, h2_data);
}
/// Subtraction
template <typename A>
CUTLASS_HOST_DEVICE complex<T> operator-(complex<A> const &rhs) const {
return complex<T>(this->real() - rhs.real(), this->imag() - rhs.imag());
}
/// Multiplication
template <typename A>
CUTLASS_HOST_DEVICE complex<T> operator*(complex<A> const &rhs) const {
return complex<T>(this->real() * rhs.real() - this->imag() * rhs.imag(),
this->real() * rhs.imag() + this->imag() * rhs.real());
}
/// Scalar Multiplication
template <typename A>
CUTLASS_HOST_DEVICE complex<T> operator*(A const &s) const {
return complex<T>(this->real() * s, this->imag() * s);
}
/// Division
template <typename A>
CUTLASS_HOST_DEVICE complex<T> operator/(complex<A> const &rhs) const {
T d = T(rhs.real() * rhs.real() + rhs.imag() * rhs.imag());
return complex<T>(
(real() * rhs.real() + imag() * rhs.imag()) / d,
(imag() * rhs.real() - real() * rhs.imag()) / d
);
}
/// Scalar Division
template <typename A>
CUTLASS_HOST_DEVICE complex<T> operator/(A const &s) const {
return complex<T>(this->real() / s, this->imag() / s);
}
/// Addition
template <typename A>
CUTLASS_HOST_DEVICE complex<T> &operator+=(complex<A> const &rhs) {
*this = *this + rhs;
return *this;
}
/// Subtraction
template <typename A>
CUTLASS_HOST_DEVICE complex<T> &operator-=(complex<A> const &rhs) {
*this = *this - rhs;
return *this;
}
/// Multiplication
template <typename A>
CUTLASS_HOST_DEVICE complex<T> &operator*=(complex<A> const &rhs) {
*this = *this * rhs;
return *this;
}
/// Scalar multiplication
template <typename A>
CUTLASS_HOST_DEVICE complex<T> &operator*=(A s) {
*this = *this * s;
return *this;
}
/// Division
template <typename A>
CUTLASS_HOST_DEVICE complex<T> &operator/=(complex<A> const &rhs) {
*this = *this / rhs;
return *this;
}
/// Accesses the real part of the complex number
CUTLASS_HOST_DEVICE
T const &real() const { return _real; }
/// Accesses the real part of the complex number
CUTLASS_HOST_DEVICE
T &real() { return _real; }
/// Accesses the imaginary part of the complex number
CUTLASS_HOST_DEVICE
T const &imag() const { return _imag; }
/// Accesses the imaginary part of the complex number
CUTLASS_HOST_DEVICE
T &imag() { return _imag; }
#if !defined(__CUDACC_RTC__)
/// Converts to cuFloatComplex
CUTLASS_HOST_DEVICE
explicit operator cuFloatComplex() const { return make_cuFloatComplex(float(real()), float(imag())); }
/// Converts to cuDoubleComplex
CUTLASS_HOST_DEVICE
explicit operator cuDoubleComplex() const { return make_cuDoubleComplex(real(), imag()); }
#endif
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Accessors for complex template
//
/// Returns the real part of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T const &real(complex<T> const &z) {
return z.real();
}
/// Returns the real part of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T &real(complex<T> &z) {
return z.real();
}
/// Returns the imaginary part of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T const &imag(complex<T> const &z) {
return z.imag();
}
/// Returns the imaginary part of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T &imag(complex<T> &z) {
return z.imag();
}
/// Returns the real part of the real number
template <typename T>
CUTLASS_HOST_DEVICE T const &real(T const &r) {
return r;
}
/// Returns the real part of the real number
template <typename T>
CUTLASS_HOST_DEVICE T &real(T &r) {
return r;
}
/// Returns the imaginary part of the real number
template <typename T>
CUTLASS_HOST_DEVICE T const &imag(T const &r) {
return T();
}
/// Returns the imaginary part of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T &imag(T &r) {
return T();
}
//
// Output operators
//
#if !defined(__CUDACC_RTC__)
template <typename T>
std::ostream &operator<<(std::ostream &out, complex<T> const &z) {
T _r = real(z);
T _i = imag(z);
if (bool(_i)) {
return out << _r << "+i" << _i;
}
return out << _r;
}
#endif
//
// Non-member operators defined for complex types
//
//
// Non-member functions defined for complex numbers
//
/// Returns the magnitude of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T abs(complex<T> const &z) {
return sqrt(norm(z));
}
/// Returns the magnitude of the complex number
template <typename T>
CUTLASS_HOST_DEVICE T arg(complex<T> const &z) {
return atan2(imag(z), real(z));
}
/// Returns the squared magnitude of a real number
template <typename T>
CUTLASS_HOST_DEVICE T norm(T const &z) {
return z * z;
}
/// Returns the squared magnitude of a real number
template <>
CUTLASS_HOST_DEVICE int8_t norm(int8_t const &z) {
return static_cast<int8_t>(z * z);
}
/// Returns the squared magnitude of a complex number
template <typename T>
CUTLASS_HOST_DEVICE double norm(complex<T> const &z) {
return real(z) * real(z) + imag(z) * imag(z);
}
/// Norm-accumulate calculation
template <typename T, typename R>
CUTLASS_HOST_DEVICE R norm_accumulate(T const &x, R const & accumulator) {
return accumulator + static_cast<R>(x) * static_cast<R>(x);
}
/// Norm accumulate specialized for complex types
template <typename T, typename R>
CUTLASS_HOST_DEVICE R norm_accumulate(complex<T> const &z, R const &accumulator) {
return accumulator + static_cast<R>(real(z)) * static_cast<R>(real(z)) +
static_cast<R>(imag(z)) * static_cast<R>(imag(z));
}
/// Returns the complex conjugate
CUTLASS_HOST_DEVICE float conj(float const &z) {
return z;
}
/// Returns the complex conjugate
CUTLASS_HOST_DEVICE double conj(double const &z) {
return z;
}
/// Returns the complex conjugate
template <typename T>
CUTLASS_HOST_DEVICE complex<T> conj(complex<T> const &z) {
return complex<T>(real(z), -imag(z));
}
/// Indentity transform for non-complex types
template <typename T>
CUTLASS_HOST_DEVICE T conj(T const &z) {
static_assert( !platform::is_same<T, cuComplex>::value &&
!platform::is_same<T, cuDoubleComplex>::value &&
!platform::is_same<T, cutlass::complex<double>>::value &&
!platform::is_same<T, cutlass::complex<float>>::value, "May not be a complex data type");
return z;
}
/// Projects the complex number z onto the Riemann sphere
template <typename T>
CUTLASS_HOST_DEVICE complex<T> proj(complex<T> const &z) {
T d = real(z) * real(z) + imag(z) * imag(z) + T(1);
return complex<T>((T(2) * real(z)) / d, (T(2) * imag(z)) / d);
}
/// Returns a complex number with magnitude r and phase theta
template <typename T>
CUTLASS_HOST_DEVICE complex<T> polar(T const &r, T const &theta = T()) {
return complex<T>(r * cos(theta), r * sin(theta));
}
/// Computes the complex exponential of z.
template <typename T>
CUTLASS_HOST_DEVICE complex<T> exp(complex<T> const &z) {
return complex<T>(fast_exp(real(z)) * fast_cos(imag(z)), fast_exp(real(z)) * fast_sin(imag(z)));
}
/// Computes the log of z
template <typename T>
CUTLASS_HOST_DEVICE complex<T> log(complex<T> const &z) {
return complex<T>(log(abs(z)), arg(z));
}
/// Computes the log base 10 of z
template <typename T>
CUTLASS_HOST_DEVICE complex<T> log10(complex<T> const &z) {
return log(z) / T(log(T(10)));
}
/// Computes the square root of complex number z
template <typename T>
CUTLASS_HOST_DEVICE complex<T> sqrt(complex<T> const &z) {
return sqrt(T(2)) / T(2) *
complex<T>(sqrt(sqrt(norm(z)) + real(z)),
(imag(z) < 0 ? T(-1) : T(1)) * sqrt(sqrt(norm(z)) - real(z)));
}
/// Computes the cosine of complex z.
template <typename T>
CUTLASS_HOST_DEVICE complex<T> cos(complex<T> const &z) {
return (exp(z) + exp(-z)) / T(2);
}
/// Computes the sin of complex z.
template <typename T>
CUTLASS_HOST_DEVICE complex<T> sin(complex<T> const &z) {
return (exp(-z) - exp(z)) * complex<T>(T(0), T(1) / T(2));
}
/// Comparison
template <typename T>
CUTLASS_HOST_DEVICE bool operator<(complex<T> const &lhs, complex<T> const &rhs) {
//TODO
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex-valued type.
template <typename T>
struct RealType< complex<T> >
{
using Type = T;
/// Number of elements
static int const kExtent = 2;
CUTLASS_HOST_DEVICE
static complex<T> from_real(double x) {
return complex<T>(static_cast<T>(x));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
cutlass::complex<half_t> from_real<cutlass::complex<half_t> >(double r) {
return cutlass::complex<half_t>(half_t(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::complex<float> from_real<cutlass::complex<float> >(double r) {
return cutlass::complex<float>(float(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::complex<double> from_real<cutlass::complex<double> >(double r) {
return cutlass::complex<double>(r);
}
//////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct is_complex {
static bool const value = false;
};
template <typename T>
struct is_complex<complex<T>> {
static bool const value = true;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Squares with optional conversion
template <typename T, typename Output>
struct magnitude_squared<complex<T>, Output> {
CUTLASS_HOST_DEVICE
Output operator()(complex<T> lhs) const {
multiplies<Output> mul_op;
Output y_r = Output(lhs.real());
Output y_i = Output(lhs.imag());
return mul_op(y_r, y_r) + mul_op(y_i, y_i);
}
};
/// Fused multiply-add
template <typename T>
struct multiply_add<complex<T>, complex<T>, complex<T>> {
CUTLASS_HOST_DEVICE
complex<T> operator()(
complex<T> const &a,
complex<T> const &b,
complex<T> const &c) const {
T real = c.real();
T imag = c.imag();
real += a.real() * b.real();
real += -a.imag() * b.imag();
imag += a.real() * b.imag();
imag += a.imag () * b.real();
return complex<T>{
real,
imag
};
}
};
/// Fused multiply-add
template <typename T>
struct multiply_add<complex<T>, T, complex<T>> {
CUTLASS_HOST_DEVICE
complex<T> operator()(
complex<T> const &a,
T const &b,
complex<T> const &c) const {
T real = c.real();
T imag = c.imag();
real += a.real() * b;
imag += a.imag () * b;
return complex<T>{
real,
imag
};
}
};
/// Fused multiply-add
template <typename T>
struct multiply_add<T, complex<T>, complex<T>> {
CUTLASS_HOST_DEVICE
complex<T> operator()(
T const &a,
complex<T> const &b,
complex<T> const &c) const {
T real = c.real();
T imag = c.imag();
real += a * b.real();
imag += a * b.imag();
return complex<T>{
real,
imag
};
}
};
/// Conjugate
template <typename T>
struct conjugate<complex<T>> {
CUTLASS_HOST_DEVICE
complex<T> operator()(complex<T> const &a) const {
return conj(a);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output>
struct magnitude_squared_difference<complex<T>, Output> {
CUTLASS_HOST_DEVICE
Output operator()(complex<T> lhs, complex<T> rhs) const {
multiplies<Output> mul_op;
Output y_r = Output(lhs.real()) - Output(rhs.real());
Output y_i = Output(lhs.imag()) - Output(rhs.imag());
return mul_op(y_r, y_r) + mul_op(y_i, y_i);
}
};
/// Reduces value into the data pointed to by ptr (complex<T> specialization)
template <typename T>
struct red<complex<T>> {
CUTLASS_DEVICE
void operator()(complex<T> *ptr, const complex<T> &data)
{
data.red(ptr);
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
//////////////////////////////////////////////////////////////////////////////////////////////////
| 19,422 | C | 26.511331 | 110 | 0.617135 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/half.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
#ifndef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#endif
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
// F16C extensions are not meaningful when compiling for NVRTC which only accommodates device code.
#undef CUTLASS_ENABLE_F16C
#define CUTLASS_ENABLE_F16C 0
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#include "cutlass/float8.h"
#include "cutlass/platform/platform.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// Optionally target F16C extentions to accelerate half-precision conversion.
#if !defined(__CUDA_ARCH__) && (CUTLASS_ENABLE_F16C)
#if defined(_MSC_VER)
#include <immintrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <intrin.h>
#endif
#define F16C_ROUND_NEAREST 0
#if !defined(__CUDA_ARCH__)
extern __inline float _cvtsh_ss (unsigned short __S) {
__m128i packed;
std::memcpy(&packed, &__S, sizeof(__S));
__m128 result = _mm_cvtph_ps(packed);
float flt;
std::memcpy(&flt, &result, sizeof(flt));
return flt;
}
__inline unsigned short _cvtss_sh (float __F, const int) {
__m128 packed;
std::memcpy(&packed, &__F, sizeof(__F));
__m128i result = _mm_cvtps_ph(packed, F16C_ROUND_NEAREST);
unsigned short u;
std::memcpy(&u, &result, sizeof(u));
return u;
}
#endif
#else
// Linux
#include <x86intrin.h>
#if defined(__i386__) || defined(__x86_64__)
#include <cpuid.h>
#endif
#define F16C_ROUND_NEAREST (_MM_FROUND_TO_NEAREST_INT |_MM_FROUND_NO_EXC)
#endif // _MSC_VER
class CpuId {
bool f16c_enabled;
CpuId() {
#if defined(__i386__) || defined(__x86_64__)
#if defined(_MSC_VER)
int exx[4];
__cpuid (exx, 1);
f16c_enabled = exx[2] & 0x20000000;
#else
// GCC / Clang
int eax, ebx, ecx, edx;
__cpuid (1 , eax, ebx, ecx, edx);
f16c_enabled = ecx & 0x20000000;
#endif
#else
// Arm / PowerPC etc.
f16c_enabled = false;
#endif
}
public:
bool is_f16c_supported() const {
return f16c_enabled;
}
static const CpuId& instance() {
static CpuId cpu;
return cpu;
}
};
#endif // !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// IEEE half-precision floating-point type
struct alignas(2) half_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Static conversion operators
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static half_t bitcast(uint16_t x) {
half_t h;
h.storage = x;
return h;
}
/// FP32 -> FP16 conversion - rounds to nearest even
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static half_t convert(float const& flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__float2half_rn(flt));
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = _cvtss_sh(flt, F16C_ROUND_NEAREST);
return bitcast(u);
}
#endif
// software implementation rounds toward nearest even
unsigned s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<unsigned const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int16_t exp = uint16_t(((s >> 23) & 0xff) - 127);
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return bitcast(u);
}
int sticky_bit = 0;
if (exp >= -14) {
// normal fp32 to normal fp16
exp = uint16_t(exp + uint16_t(15));
u = uint16_t(((exp & 0x1f) << 10));
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
// round to nearest even
int round_bit = ((mantissa >> 12) & 1);
sticky_bit |= ((mantissa & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint16_t(u + 1);
}
u |= sign;
return bitcast(u);
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(int const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__int2half_rn(n));
#else
return convert(float(n));
#endif
}
/// FP32 -> FP16 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static half_t convert(unsigned const& n) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__uint2half_rn(n));
#else
return convert(float(n));
#endif
}
/// Converts a half-precision value stored as a uint16_t to a float
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 530)
// Avoid inlining in device code if no hardware support
__device__ __noinline__
#else
CUTLASS_HOST_DEVICE
#endif
static float convert(half_t const& x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __half2float(x.to_half());
#else
#if !defined(__CUDA_ARCH__) && CUTLASS_ENABLE_F16C
if( CpuId::instance().is_f16c_supported() ) {
unsigned short u = x.storage;
return _cvtsh_ss(u);
}
#endif
uint16_t const &h = x.storage;
int sign = ((h >> 15) & 1);
int exp = ((h >> 10) & 0x1f);
int mantissa = (h & 0x3ff);
unsigned f = 0;
if (exp > 0 && exp < 31) {
// normal
exp += 112;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += 113;
while ((mantissa & (1 << 10)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= 0x3ff;
f = (sign << 31) | (exp << 23) | (mantissa << 13);
} else {
// sign-preserving zero
f = (sign << 31);
}
} else if (exp == 31) {
if (mantissa) {
f = 0x7fffffff; // not a number
} else {
f = (0xff << 23) | (sign << 31); // inf
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
#endif
}
//
// Methods
//
/// Default constructor
half_t() = default;
/// Reinterpret cast from CUDA's half type
CUTLASS_HOST_DEVICE
explicit half_t(half const & x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(float x) {
storage = convert(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit half_t(double x): half_t(float(x)) {
}
/// float_e4m3_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e4m3_t x): half_t(float(x)) {
}
/// float_e5m2_t conversion
CUTLASS_HOST_DEVICE
explicit half_t(float_e5m2_t x): half_t(float(x)) {
}
/// Integer conversion - round to nearest even
CUTLASS_HOST_DEVICE
explicit half_t(int x) {
storage = convert(x).storage;
}
/// Integer conversion - round toward zero
CUTLASS_HOST_DEVICE
explicit half_t(unsigned x) {
storage = convert(x).storage;
}
/// Assignment
CUTLASS_HOST_DEVICE
half_t & operator=(half const &x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint16_t const &>(x);
#else
__half_raw raw(x);
std::memcpy(&storage, &raw.x, sizeof(storage));
#endif
return *this;
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return convert(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(convert(*this));
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(convert(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (convert(*this) != 0.0f);
}
/// Bitcasts to CUDA's half type
CUTLASS_HOST_DEVICE
half to_half() const {
#if defined(__CUDA_ARCH__)
return reinterpret_cast<half const &>(storage);
#else
__half_raw raw;
std::memcpy(&raw.x, &storage, sizeof(raw.x));
return half(raw);
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> 10) & 0x1f);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & 0x3ff);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::half_t const& h) {
return ((h.raw() & 0x8000) != 0);
}
CUTLASS_HOST_DEVICE
cutlass::half_t abs(cutlass::half_t const& h) {
return cutlass::half_t::bitcast(h.raw() & 0x7fff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::half_t const& h) {
return (h.exponent_biased() != 0x1f);
}
CUTLASS_HOST_DEVICE
cutlass::half_t nanh(const char*) {
// NVIDIA canonical NaN
return cutlass::half_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::half_t const& h) {
return (h.exponent_biased() == 0x1f) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::half_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x1f;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::half_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x1f) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::half_t sqrt(cutlass::half_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::half_t(sqrtf(float(h)));
#else
return cutlass::half_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
half_t copysign(half_t const& a, half_t const& b) {
uint16_t a_mag = (a.raw() & 0x7fff);
uint16_t b_sign = (b.raw() & 0x8000);
uint16_t result = (a_mag | b_sign);
return half_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace std
#endif
namespace platform {
/// std::numeric_limits
template <class T>
struct numeric_limits;
/// Numeric limits
template <>
struct numeric_limits<cutlass::half_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = true;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 10;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::half_t min() { return cutlass::half_t::bitcast(0x0001); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t lowest() { return cutlass::half_t::bitcast(0xfbff); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t max() { return cutlass::half_t::bitcast(0x7bff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::half_t epsilon() { return cutlass::half_t::bitcast(0x1800); }
/// Returns maximum rounding error
CUTLASS_HOST_DEVICE
static cutlass::half_t round_error() { return cutlass::half_t(0.5f); }
/// Returns positive infinity value
CUTLASS_HOST_DEVICE
static cutlass::half_t infinity() { return cutlass::half_t::bitcast(0x7c00); }
/// Returns quiet NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t quiet_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns signaling NaN value
CUTLASS_HOST_DEVICE
static cutlass::half_t signaling_NaN() { return cutlass::half_t::bitcast(0x7fff); }
/// Returns smallest positive subnormal value
CUTLASS_HOST_DEVICE
static cutlass::half_t denorm_min() { return cutlass::half_t::bitcast(0x0001); }
};
} // namespace platform
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __heq(lhs.to_half(), rhs.to_half());
#else
return float(lhs) == float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator!=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hne(lhs.to_half(), rhs.to_half());
#else
return float(lhs) != float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hlt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) < float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator<=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hle(lhs.to_half(), rhs.to_half());
#else
return float(lhs) <= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hgt(lhs.to_half(), rhs.to_half());
#else
return float(lhs) > float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
bool operator>=(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return __hge(lhs.to_half(), rhs.to_half());
#else
return float(lhs) >= float(rhs);
#endif
}
CUTLASS_HOST_DEVICE
half_t operator+(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) + float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hneg(lhs.to_half()));
#else
return half_t(-float(lhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator-(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) - float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator*(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) * float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t operator/(half_t const& lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
return half_t(float(lhs) / float(rhs));
#endif
}
CUTLASS_HOST_DEVICE
half_t& operator+=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) + float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator-=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) - float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator*=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hmul(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) * float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator/=(half_t & lhs, half_t const& rhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hdiv(lhs.to_half(), rhs.to_half()));
#else
lhs = half_t(float(lhs) / float(rhs));
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator++(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
++tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t& operator--(half_t & lhs) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
--tmp;
lhs = half_t(tmp);
#endif
return lhs;
}
CUTLASS_HOST_DEVICE
half_t operator++(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hadd(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp++;
lhs = half_t(tmp);
#endif
return ret;
}
CUTLASS_HOST_DEVICE
half_t operator--(half_t & lhs, int) {
half_t ret(lhs);
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
lhs = half_t(__hsub(lhs.to_half(), half_t(1.0f).to_half()));
#else
float tmp(lhs);
tmp--;
lhs = half_t(tmp);
#endif
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(long double x) {
return cutlass::half_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::half_t operator "" _hf(unsigned long long int x) {
return cutlass::half_t(int(x));
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 23,615 | C | 24.669565 | 100 | 0.588016 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/core_io.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Helpers for printing cutlass/core objects
*/
#pragma once
#include <iostream>
#include <typeinfo>
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix.h"
#include "cutlass/quaternion.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/tensor_view.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/conv/convolution.h"
#include "cutlass/conv/conv2d_problem_size.h"
#include "cutlass/conv/conv3d_problem_size.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Output operator for CUDA built-in dim3 type
inline std::ostream &operator<<(std::ostream &out, dim3 d) {
return out << d.x << ", " << d.y << ", " << d.z;
}
/// Output operator for CUDA built-in error type
inline std::ostream &operator<<(std::ostream &out, cudaError_t error) {
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
return out << cudaGetErrorString(error);
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, int Rank>
inline
std::ostream& operator<<(std::ostream& out, Array<Element, Rank> const& v) {
for (int i = 0; i < Rank; ++i) {
out << (i ? ", " : "") << v[i];
}
return out;
}
template <int Rank>
inline
std::ostream& operator<<(std::ostream& out, Coord<Rank> const& coord) {
for (int i = 0; i < Rank; ++i) {
out << (i ? ", " : "") << coord[i];
}
return out;
}
inline
std::istream & operator>>(std::istream &stream, half_t &x) {
float tmp;
stream >> tmp;
x = static_cast<cutlass::half_t>(tmp);
return stream;
}
inline
std::ostream & operator<<(std::ostream &out, half_t const &x) {
return out << float(x);
}
inline
std::ostream & operator<<(std::ostream &out, bfloat16_t const &x) {
return out << float(x);
}
inline
std::ostream & operator<<(std::ostream &out, tfloat32_t const &x) {
return out << float(x);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to enable formatted printing of CUTLASS scalar types to an ostream
template <typename T>
struct ScalarIO {
/// Value to print
T value;
/// Default ctor
ScalarIO() { }
/// Constructs from a value
ScalarIO(T value): value(value) {}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default printing to ostream
template <typename T>
inline std::ostream &operator<<(std::ostream &out, ScalarIO<T> const &scalar) {
return out << scalar.value;
}
/// Printing to ostream of int8_t as integer rather than character
template <>
inline std::ostream &operator<<(std::ostream &out, ScalarIO<int8_t> const &scalar) {
return out << int(scalar.value);
}
/// Printing to ostream of uint8_t as integer rather than character
template <>
inline std::ostream &operator<<(std::ostream &out, ScalarIO<uint8_t> const &scalar) {
return out << unsigned(scalar.value);
}
/// Default printing to ostream for MatrixShape
template <int Row, int Column>
inline
std::ostream & operator<<(std::ostream &out, MatrixShape<Row, Column> const &matrix_shape) {
out << "cutlass::MatrixShape::(kRow, kColumn) {"
<< cutlass::MatrixShape<Row,Column>::kRow <<","
<< cutlass::MatrixShape<Row,Column>::kColumn <<"}";
return out;
}
/// Prints matrix to ostream
template <typename Element, int Rows, int Columns>
std::ostream & operator<<(std::ostream &out, Matrix<Element, Rows, Columns> const &rhs) {
for (int i = 0; i < Rows; ++i) {
for (int j = 0; j < Columns; ++j) {
ScalarIO<Element> element(rhs.at(i, j));
out << (j ? ", " : "") << element;
}
out << "\\n";
}
return out;
}
template <typename T>
std::ostream &operator<<(std::ostream &out, Quaternion<T> const &rhs) {
out << ScalarIO<T>(rhs.w()) << " ";
if (rhs.x() >= 0) {
out << "+";
}
out << ScalarIO<T>(rhs.x()) << "*i ";
if (rhs.y() >= 0) {
out << "+";
}
out << ScalarIO<T>(rhs.y()) << "*j ";
if (rhs.z() >= 0) {
out << "+";
}
out << ScalarIO<T>(rhs.z()) << "*k";
return out;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass::gemm namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace gemm {
/// Default printing to ostream for GemmShape
template <int M, int N, int K>
inline
std::ostream & operator<<(std::ostream &out, GemmShape<M,N,K> const &gemm_shape) {
out << "cutlass::gemm::GemmShape::(kM, kN, kK) {"
<< cutlass::gemm::GemmShape<M,N,K>::kM <<","
<< cutlass::gemm::GemmShape<M,N,K>::kN <<","
<< cutlass::gemm::GemmShape<M,N,K>::kK << "}";
return out;
}
/// Default printing to ostream for GemmCoord
inline
std::ostream & operator<<(std::ostream &out, GemmCoord const &gemm_coord) {
out << "cutlass::gemm::GemmCoord {"
<< gemm_coord.m() <<","
<< gemm_coord.n() <<","
<< gemm_coord.k() << "}";
return out;
}
} //namespace gemm
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default printing to ostream for PitchLinearShape
template < int Contiguous, int Strided>
inline
std::ostream & operator<<(std::ostream &out, PitchLinearShape<Contiguous, Strided> const &pitch_linear_shape) {
out << "cutlass::PitchLinearShape:(kContiguous, kStrided) {"
<< cutlass::layout::PitchLinearShape<Contiguous,Strided>::kContiguous <<","
<< cutlass::layout::PitchLinearShape<Contiguous,Strided>::kStrided <<"}";
return out;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////
// stream operators for cutlass::conv namespace //
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace conv {
/// Default printing to ostream for Conv2dProblemSize
inline
std::ostream& operator<<(std::ostream& out, Conv2dProblemSize const& problem) {
out << "NHWC: (" << problem.N << ", " << problem.H << ", " << problem.W << ", " << problem.C << ")" << std::endl
<< "KRSC: (" << problem.K << ", " << problem.R << ", " << problem.S << ", " << problem.C / problem.groups << ")" << std::endl
<< "NPQK: (" << problem.N << ", " << problem.P << ", " << problem.Q << ", " << problem.K << ")" << std::endl
<< "groups: (" << problem.groups << ")" << std::endl
<< "Pad_h, Pad_w: (" << problem.pad_h << ", " << problem.pad_w << ")" << std::endl
<< "Stride_h, Stride_w: (" << problem.stride_h << ", " << problem.stride_w << ")" << std::endl
<< "Dilation_h, Dilation_w: (" << problem.dilation_h << ", " << problem.dilation_w << ")" << std::endl
<< "split_k_slices: (" << problem.split_k_slices << ")" << std::endl
<< "mode: (" << ((problem.mode==conv::Mode::kConvolution) ? "conv" : "xcross") << ")";
return out;
}
/// Default printing to ostream for Conv3dProblemSize
inline
std::ostream& operator<<(std::ostream& out, Conv3dProblemSize const& problem) {
out << "NDHWC: (" << problem.N << ", " << problem.D << ", " << problem.H << ", " << problem.W << ", " << problem.C << ")" << std::endl
<< "KTRSC: (" << problem.K << ", " << problem.T << ", " << problem.R << ", " << problem.S << ", " << problem.C << ")" << std::endl
<< "NZPQK: (" << problem.N << ", " << problem.Z << ", " << problem.P << ", " << problem.Q << ", " << problem.K << ")" << std::endl
<< "pad_d, pad_h, pad_w: (" << problem.pad_d << ", " << problem.pad_h << ", " << problem.pad_w << ")" << std::endl
<< "stride_d, stride_h, stride_w: (" << problem.stride_d << ", " << problem.stride_h << ", " << problem.stride_w << ")" << std::endl
<< "dilation_d, dilation_h, dilation_w: (" << problem.dilation_d << ", " << problem.dilation_h << ", " << problem.dilation_w << ")" << std::endl
<< "split_k_slices: (" << problem.split_k_slices << ") " << std::endl
<< "mode: (" << ((problem.mode==conv::Mode::kConvolution) ? "conv" : "xcross") << ")";
return out;
}
} // namespace conv
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
| 11,077 | C | 37.2 | 151 | 0.514309 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/constants.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Boost-style constant definitions for floating-point types.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/complex.h"
///////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace constants {
///////////////////////////////////////////////////////////////////////////////////
//
// Primary templates
//
/// Returns 1, the multiplicative identity element
template <typename T> CUTLASS_HOST_DEVICE T one();
/// Returns 0, the additive identity element
template <typename T> CUTLASS_HOST_DEVICE T zero();
/// Returns 2
template <typename T> CUTLASS_HOST_DEVICE T two();
/// Returns pi, approximately 3.141
template <typename T> CUTLASS_HOST_DEVICE T pi();
/// Returns 2 * pi
template <typename T> CUTLASS_HOST_DEVICE T two_pi();
/// Returns pi / 2
template <typename T> CUTLASS_HOST_DEVICE T half_pi();
/// Returns sqrt(pi)
template <typename T> CUTLASS_HOST_DEVICE T root_pi();
/// Returns sqrt(pi / 2)
template <typename T> CUTLASS_HOST_DEVICE T root_half_pi();
/// Returns sqrt(2 * pi)
template <typename T> CUTLASS_HOST_DEVICE T root_two_pi();
/// Returns sqrt(ln(4))
template <typename T> CUTLASS_HOST_DEVICE T root_ln_four();
/// Returns e, approximately 2.718...
template <typename T> CUTLASS_HOST_DEVICE T e();
/// Returns (1/2)
template <typename T> CUTLASS_HOST_DEVICE T half();
/// Returns sqrt(2), approximately 1.414...
template <typename T> CUTLASS_HOST_DEVICE T root_two();
/// Returns sqrt(2)/2, approximately 0.707...
template <typename T> CUTLASS_HOST_DEVICE T half_root_two();
/// Returns ln(2), approximately 0.693...
template <typename T> CUTLASS_HOST_DEVICE T ln_two();
/// Returns ln(ln(2)), approximately -0.3665...
template <typename T> CUTLASS_HOST_DEVICE T ln_ln_two();
/// Returns 1/3, approximately 0.333...
template <typename T> CUTLASS_HOST_DEVICE T third();
/// Returns 2/3, approximately 0.666...
template <typename T> CUTLASS_HOST_DEVICE T twothirds();
/// Returns pi - 3, approximately 0.1416...
template <typename T> CUTLASS_HOST_DEVICE T pi_minus_three();
/// Returns 4 - pi, approximately 0.858...
template <typename T> CUTLASS_HOST_DEVICE T four_minus_pi();
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for double
/// Returns 1, the multiplicative identity element (specialization for double)
template <> CUTLASS_HOST_DEVICE double one<double>() {
uint64_t bits = 0x3ff0000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> one< complex<double> >() {
return complex<double>(one<double>(), double());
}
/// Returns 0, the additive identity element (specialization for double)
template <> CUTLASS_HOST_DEVICE double zero<double>() {
uint64_t bits = 0x0ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> zero< complex<double> >() {
return complex<double>(zero<double>(), double());
}
/// Returns 2 (specialization for double)
template <> CUTLASS_HOST_DEVICE double two<double>() {
uint64_t bits = 0x4000000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> two< complex<double> >() {
return complex<double>(two<double>(), double());
}
/// Returns pi, approximately 3.141 (specialization for double)
template <> CUTLASS_HOST_DEVICE double pi<double>() {
uint64_t bits = 0x400921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> pi< complex<double> >() {
return complex<double>(pi<double>(), double());
}
/// Returns 2 * pi (specialization for double)
template <> CUTLASS_HOST_DEVICE double two_pi<double>() {
uint64_t bits = 0x401921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2 * pi (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> two_pi< complex<double> >() {
return complex<double>(two_pi<double>(), double());
}
/// Returns pi / 2 (specialization for double)
template <> CUTLASS_HOST_DEVICE double half_pi<double>() {
uint64_t bits = 0x3ff921fb54442d18ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi / 2 (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half_pi< complex<double> >() {
return complex<double>(half_pi<double>(), double());
}
/// Returns sqrt(pi) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_pi<double>() {
uint64_t bits = 0x3ffc5bf891b4ef6aull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_pi< complex<double> >() {
return complex<double>(root_pi<double>(), double());
}
/// Returns sqrt(pi / 2) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_half_pi<double>() {
uint64_t bits = 0x3ff40d931ff62705ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_half_pi< complex<double> >() {
return complex<double>(root_half_pi<double>(), double());
}
/// Returns sqrt(2 * pi) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_two_pi<double>() {
uint64_t bits = 0x40040d931ff62705ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_two_pi< complex<double> >() {
return complex<double>(root_two_pi<double>(), double());
}
/// Returns sqrt(ln(4)) (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_ln_four<double>() {
uint64_t bits = 0x3ff2d6abe44afc43ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_ln_four< complex<double> >() {
return complex<double>(root_ln_four<double>(), double());
}
/// Returns e, approximately 2.718... (specialization for double)
template <> CUTLASS_HOST_DEVICE double e<double>() {
uint64_t bits = 0x4005bf0a8b145769ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> e< complex<double> >() {
return complex<double>(e<double>(), double());
}
/// Returns (1/2) (specialization for double)
template <> CUTLASS_HOST_DEVICE double half<double>() {
uint64_t bits = 0x3fe0000000000000ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns (1/2) (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half< complex<double> >() {
return complex<double>(half<double>(), double());
}
/// Returns sqrt(2), approximately 1.414... (specialization for double)
template <> CUTLASS_HOST_DEVICE double root_two<double>() {
uint64_t bits = 0x3ff6a09e667f3bcdull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> root_two< complex<double> >() {
return complex<double>(root_two<double>(), double());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for double)
template <> CUTLASS_HOST_DEVICE double half_root_two<double>() {
uint64_t bits = 0x3fe6a09e667f3bcdull;
return reinterpret_cast<double const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> half_root_two< complex<double> >() {
return complex<double>(half_root_two<double>(), double());
}
/// Returns ln(2), approximately 0.693... (specialization for double)
template <> CUTLASS_HOST_DEVICE double ln_two<double>() {
uint64_t bits = 0x3fe62e42fefa39efull;
return reinterpret_cast<double const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> ln_two< complex<double> >() {
return complex<double>(ln_two<double>(), double());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for double)
template <> CUTLASS_HOST_DEVICE double ln_ln_two<double>() {
uint64_t bits = 0xbfd774f29bdd6b9full;
return reinterpret_cast<double const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> ln_ln_two< complex<double> >() {
return complex<double>(ln_ln_two<double>(), double());
}
/// Returns 1/3, approximately 0.333... (specialization for double)
template <> CUTLASS_HOST_DEVICE double third<double>() {
uint64_t bits = 0x3fd5555555555555ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> third< complex<double> >() {
return complex<double>(third<double>(), double());
}
/// Returns 2/3, approximately 0.666... (specialization for double)
template <> CUTLASS_HOST_DEVICE double twothirds<double>() {
uint64_t bits = 0x3fe5555555555555ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> twothirds< complex<double> >() {
return complex<double>(twothirds<double>(), double());
}
/// Returns pi - 3, approximately 0.1416... (specialization for double)
template <> CUTLASS_HOST_DEVICE double pi_minus_three<double>() {
uint64_t bits = 0x3fc21fb54442d180ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> pi_minus_three< complex<double> >() {
return complex<double>(pi_minus_three<double>(), double());
}
/// Returns 4 - pi, approximately 0.858... (specialization for double)
template <> CUTLASS_HOST_DEVICE double four_minus_pi<double>() {
uint64_t bits = 0x3feb7812aeef4ba0ull;
return reinterpret_cast<double const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<double>)
template <> CUTLASS_HOST_DEVICE complex<double> four_minus_pi< complex<double> >() {
return complex<double>(four_minus_pi<double>(), double());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for float
/// Returns 1, the multiplicative identity element (specialization for float)
template <> CUTLASS_HOST_DEVICE float one<float>() {
uint32_t bits = 0x3f800000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> one< complex<float> >() {
return complex<float>(one<float>(), float());
}
/// Returns 0, the additive identity element (specialization for float)
template <> CUTLASS_HOST_DEVICE float zero<float>() {
uint32_t bits = 0x0u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> zero< complex<float> >() {
return complex<float>(zero<float>(), float());
}
/// Returns 2 (specialization for float)
template <> CUTLASS_HOST_DEVICE float two<float>() {
uint32_t bits = 0x40000000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> two< complex<float> >() {
return complex<float>(two<float>(), float());
}
/// Returns pi, approximately 3.141 (specialization for float)
template <> CUTLASS_HOST_DEVICE float pi<float>() {
uint32_t bits = 0x40490fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> pi< complex<float> >() {
return complex<float>(pi<float>(), float());
}
/// Returns 2 * pi (specialization for float)
template <> CUTLASS_HOST_DEVICE float two_pi<float>() {
uint32_t bits = 0x40c90fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2 * pi (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> two_pi< complex<float> >() {
return complex<float>(two_pi<float>(), float());
}
/// Returns pi / 2 (specialization for float)
template <> CUTLASS_HOST_DEVICE float half_pi<float>() {
uint32_t bits = 0x3fc90fdbu;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi / 2 (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half_pi< complex<float> >() {
return complex<float>(half_pi<float>(), float());
}
/// Returns sqrt(pi) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_pi<float>() {
uint32_t bits = 0x3fe2dfc5u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_pi< complex<float> >() {
return complex<float>(root_pi<float>(), float());
}
/// Returns sqrt(pi / 2) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_half_pi<float>() {
uint32_t bits = 0x3fa06c99u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_half_pi< complex<float> >() {
return complex<float>(root_half_pi<float>(), float());
}
/// Returns sqrt(2 * pi) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_two_pi<float>() {
uint32_t bits = 0x40206c99u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_two_pi< complex<float> >() {
return complex<float>(root_two_pi<float>(), float());
}
/// Returns sqrt(ln(4)) (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_ln_four<float>() {
uint32_t bits = 0x3f96b55fu;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_ln_four< complex<float> >() {
return complex<float>(root_ln_four<float>(), float());
}
/// Returns e, approximately 2.718... (specialization for float)
template <> CUTLASS_HOST_DEVICE float e<float>() {
uint32_t bits = 0x402df854u;
return reinterpret_cast<float const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> e< complex<float> >() {
return complex<float>(e<float>(), float());
}
/// Returns (1/2) (specialization for float)
template <> CUTLASS_HOST_DEVICE float half<float>() {
uint32_t bits = 0x3f000000u;
return reinterpret_cast<float const &>(bits);
}
/// Returns (1/2) (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half< complex<float> >() {
return complex<float>(half<float>(), float());
}
/// Returns sqrt(2), approximately 1.414... (specialization for float)
template <> CUTLASS_HOST_DEVICE float root_two<float>() {
uint32_t bits = 0x3fb504f3u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> root_two< complex<float> >() {
return complex<float>(root_two<float>(), float());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for float)
template <> CUTLASS_HOST_DEVICE float half_root_two<float>() {
uint32_t bits = 0x3f3504f3u;
return reinterpret_cast<float const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> half_root_two< complex<float> >() {
return complex<float>(half_root_two<float>(), float());
}
/// Returns ln(2), approximately 0.693... (specialization for float)
template <> CUTLASS_HOST_DEVICE float ln_two<float>() {
uint32_t bits = 0x3f317218u;
return reinterpret_cast<float const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> ln_two< complex<float> >() {
return complex<float>(ln_two<float>(), float());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for float)
template <> CUTLASS_HOST_DEVICE float ln_ln_two<float>() {
uint32_t bits = 0xbebba795u;
return reinterpret_cast<float const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> ln_ln_two< complex<float> >() {
return complex<float>(ln_ln_two<float>(), float());
}
/// Returns 1/3, approximately 0.333... (specialization for float)
template <> CUTLASS_HOST_DEVICE float third<float>() {
uint32_t bits = 0x3eaaaaabu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> third< complex<float> >() {
return complex<float>(third<float>(), float());
}
/// Returns 2/3, approximately 0.666... (specialization for float)
template <> CUTLASS_HOST_DEVICE float twothirds<float>() {
uint32_t bits = 0x3f2aaaabu;
return reinterpret_cast<float const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> twothirds< complex<float> >() {
return complex<float>(twothirds<float>(), float());
}
/// Returns pi - 3, approximately 0.1416... (specialization for float)
template <> CUTLASS_HOST_DEVICE float pi_minus_three<float>() {
uint32_t bits = 0x3e10fdaau;
return reinterpret_cast<float const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> pi_minus_three< complex<float> >() {
return complex<float>(pi_minus_three<float>(), float());
}
/// Returns 4 - pi, approximately 0.858... (specialization for float)
template <> CUTLASS_HOST_DEVICE float four_minus_pi<float>() {
uint32_t bits = 0x3f5bc095u;
return reinterpret_cast<float const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<float>)
template <> CUTLASS_HOST_DEVICE complex<float> four_minus_pi< complex<float> >() {
return complex<float>(four_minus_pi<float>(), float());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for tfloat32_t
/// Returns 1, the multiplicative identity element (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t one<tfloat32_t>() {
uint32_t bits = 0x3f801000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> one< complex<tfloat32_t> >() {
return complex<tfloat32_t>(one<tfloat32_t>(), tfloat32_t());
}
/// Returns 0, the additive identity element (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t zero<tfloat32_t>() {
uint32_t bits = 0x1000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> zero< complex<tfloat32_t> >() {
return complex<tfloat32_t>(zero<tfloat32_t>(), tfloat32_t());
}
/// Returns 2 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t two<tfloat32_t>() {
uint32_t bits = 0x40001000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(two<tfloat32_t>(), tfloat32_t());
}
/// Returns pi, approximately 3.141 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t pi<tfloat32_t>() {
uint32_t bits = 0x40491fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(pi<tfloat32_t>(), tfloat32_t());
}
/// Returns 2 * pi (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t two_pi<tfloat32_t>() {
uint32_t bits = 0x40c91fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> two_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(two_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns pi / 2 (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half_pi<tfloat32_t>() {
uint32_t bits = 0x3fc91fdbu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(pi) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_pi<tfloat32_t>() {
uint32_t bits = 0x3fe2efc5u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(pi / 2) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_half_pi<tfloat32_t>() {
uint32_t bits = 0x3fa07c99u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_half_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_half_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2 * pi) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_two_pi<tfloat32_t>() {
uint32_t bits = 0x40207c99u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_two_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_two_pi<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(ln(4)) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_ln_four<tfloat32_t>() {
uint32_t bits = 0x3f96c55fu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_ln_four< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_ln_four<tfloat32_t>(), tfloat32_t());
}
/// Returns e, approximately 2.718... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t e<tfloat32_t>() {
uint32_t bits = 0x402e0854u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> e< complex<tfloat32_t> >() {
return complex<tfloat32_t>(e<tfloat32_t>(), tfloat32_t());
}
/// Returns (1/2) (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half<tfloat32_t>() {
uint32_t bits = 0x3f001000u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t root_two<tfloat32_t>() {
uint32_t bits = 0x3fb514f3u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> root_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(root_two<tfloat32_t>(), tfloat32_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t half_root_two<tfloat32_t>() {
uint32_t bits = 0x3f3514f3u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> half_root_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(half_root_two<tfloat32_t>(), tfloat32_t());
}
/// Returns ln(2), approximately 0.693... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t ln_two<tfloat32_t>() {
uint32_t bits = 0x3f318218u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> ln_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(ln_two<tfloat32_t>(), tfloat32_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t ln_ln_two<tfloat32_t>() {
uint32_t bits = 0xbebbb795u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> ln_ln_two< complex<tfloat32_t> >() {
return complex<tfloat32_t>(ln_ln_two<tfloat32_t>(), tfloat32_t());
}
/// Returns 1/3, approximately 0.333... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t third<tfloat32_t>() {
uint32_t bits = 0x3eaabaabu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> third< complex<tfloat32_t> >() {
return complex<tfloat32_t>(third<tfloat32_t>(), tfloat32_t());
}
/// Returns 2/3, approximately 0.666... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t twothirds<tfloat32_t>() {
uint32_t bits = 0x3f2abaabu;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> twothirds< complex<tfloat32_t> >() {
return complex<tfloat32_t>(twothirds<tfloat32_t>(), tfloat32_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t pi_minus_three<tfloat32_t>() {
uint32_t bits = 0x3e110daau;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> pi_minus_three< complex<tfloat32_t> >() {
return complex<tfloat32_t>(pi_minus_three<tfloat32_t>(), tfloat32_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for tfloat32_t)
template <> CUTLASS_HOST_DEVICE tfloat32_t four_minus_pi<tfloat32_t>() {
uint32_t bits = 0x3f5bd095u;
return reinterpret_cast<tfloat32_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<tfloat32_t>)
template <> CUTLASS_HOST_DEVICE complex<tfloat32_t> four_minus_pi< complex<tfloat32_t> >() {
return complex<tfloat32_t>(four_minus_pi<tfloat32_t>(), tfloat32_t());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for half_t
/// Returns 1, the multiplicative identity element (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t one<half_t>() {
uint16_t bits = 0x3c00u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> one< complex<half_t> >() {
return complex<half_t>(one<half_t>(), half_t());
}
/// Returns 0, the additive identity element (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t zero<half_t>() {
uint16_t bits = 0x0u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> zero< complex<half_t> >() {
return complex<half_t>(zero<half_t>(), half_t());
}
/// Returns 2 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t two<half_t>() {
uint16_t bits = 0x4000u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> two< complex<half_t> >() {
return complex<half_t>(two<half_t>(), half_t());
}
/// Returns pi, approximately 3.141 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t pi<half_t>() {
uint16_t bits = 0x4248u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> pi< complex<half_t> >() {
return complex<half_t>(pi<half_t>(), half_t());
}
/// Returns 2 * pi (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t two_pi<half_t>() {
uint16_t bits = 0x4648u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> two_pi< complex<half_t> >() {
return complex<half_t>(two_pi<half_t>(), half_t());
}
/// Returns pi / 2 (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half_pi<half_t>() {
uint16_t bits = 0x3e48u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half_pi< complex<half_t> >() {
return complex<half_t>(half_pi<half_t>(), half_t());
}
/// Returns sqrt(pi) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_pi<half_t>() {
uint16_t bits = 0x3f17u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_pi< complex<half_t> >() {
return complex<half_t>(root_pi<half_t>(), half_t());
}
/// Returns sqrt(pi / 2) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_half_pi<half_t>() {
uint16_t bits = 0x3d03u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_half_pi< complex<half_t> >() {
return complex<half_t>(root_half_pi<half_t>(), half_t());
}
/// Returns sqrt(2 * pi) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_two_pi<half_t>() {
uint16_t bits = 0x4103u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_two_pi< complex<half_t> >() {
return complex<half_t>(root_two_pi<half_t>(), half_t());
}
/// Returns sqrt(ln(4)) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_ln_four<half_t>() {
uint16_t bits = 0x3cb6u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_ln_four< complex<half_t> >() {
return complex<half_t>(root_ln_four<half_t>(), half_t());
}
/// Returns e, approximately 2.718... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t e<half_t>() {
uint16_t bits = 0x4170u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> e< complex<half_t> >() {
return complex<half_t>(e<half_t>(), half_t());
}
/// Returns (1/2) (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half<half_t>() {
uint16_t bits = 0x3800u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half< complex<half_t> >() {
return complex<half_t>(half<half_t>(), half_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t root_two<half_t>() {
uint16_t bits = 0x3da8u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> root_two< complex<half_t> >() {
return complex<half_t>(root_two<half_t>(), half_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t half_root_two<half_t>() {
uint16_t bits = 0x39a8u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> half_root_two< complex<half_t> >() {
return complex<half_t>(half_root_two<half_t>(), half_t());
}
/// Returns ln(2), approximately 0.693... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t ln_two<half_t>() {
uint16_t bits = 0x398cu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> ln_two< complex<half_t> >() {
return complex<half_t>(ln_two<half_t>(), half_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t ln_ln_two<half_t>() {
uint16_t bits = 0xb5ddu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> ln_ln_two< complex<half_t> >() {
return complex<half_t>(ln_ln_two<half_t>(), half_t());
}
/// Returns 1/3, approximately 0.333... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t third<half_t>() {
uint16_t bits = 0x3555u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> third< complex<half_t> >() {
return complex<half_t>(third<half_t>(), half_t());
}
/// Returns 2/3, approximately 0.666... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t twothirds<half_t>() {
uint16_t bits = 0x3955u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> twothirds< complex<half_t> >() {
return complex<half_t>(twothirds<half_t>(), half_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t pi_minus_three<half_t>() {
uint16_t bits = 0x3088u;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> pi_minus_three< complex<half_t> >() {
return complex<half_t>(pi_minus_three<half_t>(), half_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for half_t)
template <> CUTLASS_HOST_DEVICE half_t four_minus_pi<half_t>() {
uint16_t bits = 0x3adeu;
return reinterpret_cast<half_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<half_t>)
template <> CUTLASS_HOST_DEVICE complex<half_t> four_minus_pi< complex<half_t> >() {
return complex<half_t>(four_minus_pi<half_t>(), half_t());
}
/////////////////////////////////////////////////////////////////////////////////////
// Specialization for bfloat16_t
/// Returns 1, the multiplicative identity element (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t one<bfloat16_t>() {
uint16_t bits = 0x3f80u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 1, the multiplicative identity element (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> one< complex<bfloat16_t> >() {
return complex<bfloat16_t>(one<bfloat16_t>(), bfloat16_t());
}
/// Returns 0, the additive identity element (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t zero<bfloat16_t>() {
uint16_t bits = 0x0u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 0, the additive identity element (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> zero< complex<bfloat16_t> >() {
return complex<bfloat16_t>(zero<bfloat16_t>(), bfloat16_t());
}
/// Returns 2 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t two<bfloat16_t>() {
uint16_t bits = 0x4000u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(two<bfloat16_t>(), bfloat16_t());
}
/// Returns pi, approximately 3.141 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t pi<bfloat16_t>() {
uint16_t bits = 0x4049u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi, approximately 3.141 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(pi<bfloat16_t>(), bfloat16_t());
}
/// Returns 2 * pi (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t two_pi<bfloat16_t>() {
uint16_t bits = 0x40c9u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2 * pi (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> two_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(two_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns pi / 2 (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half_pi<bfloat16_t>() {
uint16_t bits = 0x3fc9u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi / 2 (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(pi) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_pi<bfloat16_t>() {
uint16_t bits = 0x3fe3u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(pi) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(pi / 2) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_half_pi<bfloat16_t>() {
uint16_t bits = 0x3fa0u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(pi / 2) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_half_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_half_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2 * pi) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_two_pi<bfloat16_t>() {
uint16_t bits = 0x4020u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2 * pi) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_two_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_two_pi<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(ln(4)) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_ln_four<bfloat16_t>() {
uint16_t bits = 0x3f97u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(ln(4)) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_ln_four< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_ln_four<bfloat16_t>(), bfloat16_t());
}
/// Returns e, approximately 2.718... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t e<bfloat16_t>() {
uint16_t bits = 0x402eu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns e, approximately 2.718... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> e< complex<bfloat16_t> >() {
return complex<bfloat16_t>(e<bfloat16_t>(), bfloat16_t());
}
/// Returns (1/2) (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half<bfloat16_t>() {
uint16_t bits = 0x3f00u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns (1/2) (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2), approximately 1.414... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t root_two<bfloat16_t>() {
uint16_t bits = 0x3fb5u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2), approximately 1.414... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> root_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(root_two<bfloat16_t>(), bfloat16_t());
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t half_root_two<bfloat16_t>() {
uint16_t bits = 0x3f35u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns sqrt(2)/2, approximately 0.707... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> half_root_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(half_root_two<bfloat16_t>(), bfloat16_t());
}
/// Returns ln(2), approximately 0.693... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t ln_two<bfloat16_t>() {
uint16_t bits = 0x3f31u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns ln(2), approximately 0.693... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> ln_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(ln_two<bfloat16_t>(), bfloat16_t());
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t ln_ln_two<bfloat16_t>() {
uint16_t bits = 0xbebcu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns ln(ln(2)), approximately -0.3665... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> ln_ln_two< complex<bfloat16_t> >() {
return complex<bfloat16_t>(ln_ln_two<bfloat16_t>(), bfloat16_t());
}
/// Returns 1/3, approximately 0.333... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t third<bfloat16_t>() {
uint16_t bits = 0x3eabu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 1/3, approximately 0.333... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> third< complex<bfloat16_t> >() {
return complex<bfloat16_t>(third<bfloat16_t>(), bfloat16_t());
}
/// Returns 2/3, approximately 0.666... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t twothirds<bfloat16_t>() {
uint16_t bits = 0x3f2bu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 2/3, approximately 0.666... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> twothirds< complex<bfloat16_t> >() {
return complex<bfloat16_t>(twothirds<bfloat16_t>(), bfloat16_t());
}
/// Returns pi - 3, approximately 0.1416... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t pi_minus_three<bfloat16_t>() {
uint16_t bits = 0x3e11u;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns pi - 3, approximately 0.1416... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> pi_minus_three< complex<bfloat16_t> >() {
return complex<bfloat16_t>(pi_minus_three<bfloat16_t>(), bfloat16_t());
}
/// Returns 4 - pi, approximately 0.858... (specialization for bfloat16_t)
template <> CUTLASS_HOST_DEVICE bfloat16_t four_minus_pi<bfloat16_t>() {
uint16_t bits = 0x3f5cu;
return reinterpret_cast<bfloat16_t const &>(bits);
}
/// Returns 4 - pi, approximately 0.858... (specialization for complex<bfloat16_t>)
template <> CUTLASS_HOST_DEVICE complex<bfloat16_t> four_minus_pi< complex<bfloat16_t> >() {
return complex<bfloat16_t>(four_minus_pi<bfloat16_t>(), bfloat16_t());
}
///////////////////////////////////////////////////////////////////////////////////
} // namespace constants
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////
| 47,943 | C | 37.664516 | 100 | 0.689027 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/array.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_types.h"
#include "cutlass/half.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array for any data type
template <
typename T,
int N,
bool RegisterSized = sizeof_bits<T>::value >= 32
>
class Array;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the size of an Array<> in bits
template <typename T, int N, bool RegisterSized>
struct sizeof_bits<Array<T, N, RegisterSized> > {
static int const value =
int(sizeof(typename Array<T, N, RegisterSized>::Storage)) * 8 * int(Array<T, N, RegisterSized>::kStorageElements);
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns true if the argument is a power of 2
CUTLASS_HOST_DEVICE
constexpr bool ispow2(unsigned x) {
return x && (!(x & (x - 1)));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the largest power of two not greater than the argument.
CUTLASS_HOST_DEVICE
constexpr unsigned floor_pow_2(unsigned x) {
return (x == 0 || ispow2(x)) ? x : ((floor_pow_2(x >> 1)) << 1);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array for any data type
template <
typename T,
int N
>
class Array<T, N, true> {
public:
/// Storage type
using Storage = T;
/// Element type
using Element = T;
/// Number of storage elements
//static std::size_t const kStorageElements = N;
static size_t const kStorageElements = N;
/// Number of logical elements
static size_t const kElements = N;
//
// C++ standard members
//
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type &reference;
typedef value_type const & const_reference;
typedef value_type *pointer;
typedef value_type const * const_pointer;
//
// Iterators
//
/// Bidirectional iterator over elements
class iterator {
/// Pointer to object
T *ptr_;
public:
CUTLASS_HOST_DEVICE
iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
iterator(T *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
iterator &operator++() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
iterator &operator--() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
iterator operator++(int) {
iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
iterator operator--(int) {
iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T &operator*() const {
return *ptr_;
}
CUTLASS_HOST_DEVICE
bool operator==(iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Bidirectional constant iterator over elements
class const_iterator {
/// Pointer to object
const T *ptr_;
public:
CUTLASS_HOST_DEVICE
const_iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
const_iterator(T const *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
const_iterator &operator++() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_iterator &operator--() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_iterator operator++(int) {
const_iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
const_iterator operator--(int) {
const_iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T const &operator*() const {
return *ptr_;
}
CUTLASS_HOST_DEVICE
bool operator==(const_iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(const_iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Bidirectional iterator over elements
class reverse_iterator {
/// Pointer to object
T *ptr_;
public:
CUTLASS_HOST_DEVICE
reverse_iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
reverse_iterator(T *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
reverse_iterator &operator++() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
reverse_iterator &operator--() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
reverse_iterator operator++(int) {
iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
reverse_iterator operator--(int) {
iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T &operator*() const {
return *(ptr_ - 1);
}
CUTLASS_HOST_DEVICE
bool operator==(reverse_iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(reverse_iterator const &other) const {
return ptr_ != other.ptr_;
}
};
/// Bidirectional constant iterator over elements
class const_reverse_iterator {
/// Pointer to object
T const *ptr_;
public:
CUTLASS_HOST_DEVICE
const_reverse_iterator(): ptr_(nullptr) { }
CUTLASS_HOST_DEVICE
const_reverse_iterator(T const *_ptr): ptr_(_ptr) { }
CUTLASS_HOST_DEVICE
const_reverse_iterator &operator++() {
--ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_reverse_iterator &operator--() {
++ptr_;
return *this;
}
CUTLASS_HOST_DEVICE
const_reverse_iterator operator++(int) {
const_reverse_iterator ret(*this);
--ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
const_reverse_iterator operator--(int) {
const_reverse_iterator ret(*this);
++ptr_;
return ret;
}
CUTLASS_HOST_DEVICE
T const &operator*() const {
return *(ptr_ - 1);
}
CUTLASS_HOST_DEVICE
bool operator==(const_iterator const &other) const {
return ptr_ == other.ptr_;
}
CUTLASS_HOST_DEVICE
bool operator!=(const_iterator const &other) const {
return ptr_ != other.ptr_;
}
};
private:
/// Internal storage
Storage storage[kElements];
public:
#if 0
CUTLASS_HOST_DEVICE
Array() { }
CUTLASS_HOST_DEVICE
Array(Array const &x) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElements; ++i) {
storage[i] = x.storage[i];
}
}
#endif
/// Efficient clear method
CUTLASS_HOST_DEVICE
void clear() {
fill(T(0));
}
CUTLASS_HOST_DEVICE
reference at(size_type pos) {
return reinterpret_cast<reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
const_reference at(size_type pos) const {
return reinterpret_cast<const_reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
reference operator[](size_type pos) {
return reinterpret_cast<reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
const_reference operator[](size_type pos) const {
return reinterpret_cast<const_reference>(storage[pos]);
}
CUTLASS_HOST_DEVICE
reference front() {
return reinterpret_cast<reference>(storage[0]);
}
CUTLASS_HOST_DEVICE
const_reference front() const {
return reinterpret_cast<const_reference>(storage[0]);
}
CUTLASS_HOST_DEVICE
reference back() {
return reinterpret_cast<reference>(storage[kStorageElements - 1]);
}
CUTLASS_HOST_DEVICE
const_reference back() const {
return reinterpret_cast<const_reference>(storage[kStorageElements - 1]);
}
CUTLASS_HOST_DEVICE
pointer data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTLASS_HOST_DEVICE
pointer raw_data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer raw_data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTLASS_HOST_DEVICE
constexpr bool empty() const {
return !kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type max_size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
void fill(T const &value) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElements; ++i) {
storage[i] = static_cast<Storage>(value);
}
}
CUTLASS_HOST_DEVICE
iterator begin() {
return iterator(storage);
}
CUTLASS_HOST_DEVICE
const_iterator cbegin() const {
return const_iterator(storage);
}
CUTLASS_HOST_DEVICE
iterator end() {
return iterator(reinterpret_cast<pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
const_iterator cend() const {
return const_iterator(reinterpret_cast<const_pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
reverse_iterator rbegin() {
return reverse_iterator(reinterpret_cast<pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crbegin() const {
return const_reverse_iterator(reinterpret_cast<const_pointer>(storage + kStorageElements));
}
CUTLASS_HOST_DEVICE
reverse_iterator rend() {
return reverse_iterator(reinterpret_cast<pointer>(storage));
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crend() const {
return const_reverse_iterator(reinterpret_cast<const_pointer>(storage));
}
//
// Comparison operators
//
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Factories
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 1> make_Array(Element x) {
Array<Element, 1> m;
m[0] = x;
return m;
}
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 2> make_Array(Element x, Element y) {
Array<Element, 2> m;
m[0] = x;
m[1] = y;
return m;
}
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 3> make_Array(Element x, Element y, Element z) {
Array<Element, 3> m;
m[0] = x;
m[1] = y;
m[2] = z;
return m;
}
template <typename Element>
CUTLASS_HOST_DEVICE
Array<Element, 4> make_Array(Element x, Element y, Element z, Element w) {
Array<Element, 4> m;
m[0] = x;
m[1] = y;
m[2] = z;
m[3] = w;
return m;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
struct absolute_value_op< Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs) const {
Array<T, N> result;
absolute_value_op<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i]);
}
return result;
}
};
template <typename T, int N>
struct plus<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
plus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
plus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
plus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct minus<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
minus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
minus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
minus<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct multiplies<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
multiplies<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
multiplies<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
multiplies<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct divides<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
divides<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
divides<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
divides<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct maximum<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
maximum<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
maximum<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
maximum<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct minimum<Array<T, N>> {
CUTLASS_HOST_DEVICE
static T scalar_op(T const &lhs, T const &rhs) {
return (rhs < lhs ? rhs : lhs);
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, Array<T, N> const &rhs) const {
Array<T, N> result;
minimum<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], rhs[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs, T const &scalar) const {
Array<T, N> result;
minimum<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i], scalar);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()( T const &scalar, Array<T, N> const &rhs) const {
Array<T, N> result;
minimum<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, rhs[i]);
}
return result;
}
};
template <typename T, int N>
struct negate<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &lhs) const {
Array<T, N> result;
negate<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(lhs[i]);
}
return result;
}
};
/// Fused multiply-add
template <typename T, int N>
struct multiply_add<Array<T, N>, Array<T, N>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(a[i], b[i], c[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, T const &scalar, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(a[i], scalar, c[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = scalar_op(scalar, b[i], c[i]);
}
return result;
}
};
/// Fused multiply-add-relu0
template <typename T, int N>
struct multiply_add_relu0<Array<T, N>, Array<T, N>, Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
maximum<T> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(scalar_op(a[i], b[i], c[i]), T(0));
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a, T const &scalar, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
maximum<T> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(scalar_op(a[i], scalar, c[i]), T(0));
}
return result;
}
CUTLASS_HOST_DEVICE
Array<T, N> operator()(T const &scalar, Array<T, N> const &b, Array<T, N> const &c) const {
Array<T, N> result;
multiply_add<T> scalar_op;
maximum<T> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(scalar_op(scalar, b[i], c[i]), T(0));
}
return result;
}
};
template <typename T, int N>
struct conjugate<Array<T, N> > {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &a) const {
conjugate<T> conj_op;
Array<T, N> ca;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
ca[i] = conj_op(a[i]);
}
return ca;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations targeting SIMD instructions in device code.
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int N>
struct plus<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hadd2(lhs_ptr[i], rhs_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hadd(a_residual_ptr[N - 1], b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] + rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hadd2(lhs_pair, rhs_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hadd(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs + rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hadd2(lhs_ptr[i], rhs_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hadd(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] + rhs;
}
#endif
return result;
}
};
template <int N>
struct minus<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hsub2(lhs_ptr[i], rhs_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hsub(a_residual_ptr[N - 1], b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] - rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hsub2(lhs_pair, rhs_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hsub(reinterpret_cast<__half const &>(lhs), b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs - rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hsub2(lhs_ptr[i], rhs_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hsub(a_residual_ptr[N - 1], reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] - rhs;
}
#endif
return result;
}
};
template <int N>
struct multiplies<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmul2(lhs_ptr[i], rhs_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmul(a_residual_ptr[N - 1], b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] * rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmul2(lhs_pair, rhs_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmul(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs * rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmul2(lhs_ptr[i], rhs_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hmul(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] * rhs;
}
#endif
return result;
}
};
template <int N>
struct divides<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __h2div(lhs_ptr[i], rhs_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hdiv(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] / rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __h2div(lhs_pair, rhs_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hdiv(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs / rhs[i];
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __h2div(lhs_ptr[i], rhs_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hdiv(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = lhs[i] / rhs;
}
#endif
return result;
}
};
template <int N>
struct negate<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *source_ptr = reinterpret_cast<__half2 const *>(&lhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hneg2(source_ptr[i]);
}
if (N % 2) {
half_t x = lhs[N - 1];
__half lhs_val = -reinterpret_cast<__half const &>(x);
result[N - 1] = reinterpret_cast<half_t const &>(lhs_val);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = -lhs[i];
}
#endif
return result;
}
};
/// Fused multiply-add
template <int N>
struct multiply_add<Array<half_t, N>, Array<half_t, N>, Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_ptr[i], b_ptr[i], c_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
half_t const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 a_pair = __half2half2(reinterpret_cast<__half const &>(a));
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_pair, b_ptr[i], c_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma(
reinterpret_cast<__half const &>(a),
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a, b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
half_t const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_ptr[i], b_pair, c_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(b),
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b, c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
half_t const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2(a_ptr[i], b_ptr[i], c_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half d_residual = __hfma(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
reinterpret_cast<__half const &>(c));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c);
}
#endif
return result;
}
};
/// Fused multiply-add-relu0
template <int N>
struct multiply_add_relu0<Array<half_t, N>, Array<half_t, N>, Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_ptr[i], b_ptr[i], c_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma_relu(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a[i], b[i], c[i]), (half_t)0);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
half_t const &a,
Array<half_t, N> const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 a_pair = __half2half2(reinterpret_cast<__half const &>(a));
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_pair, b_ptr[i], c_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma_relu(
reinterpret_cast<__half const &>(a),
b_residual_ptr[N - 1],
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a, b[i], c[i]), half_t(0));
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
half_t const &b,
Array<half_t, N> const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 b_pair = __half2half2(reinterpret_cast<__half const &>(b));
__half2 const *c_ptr = reinterpret_cast<__half2 const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_ptr[i], b_pair, c_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *c_residual_ptr = reinterpret_cast<__half const *>(&c);
__half d_residual = __hfma_relu(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(b),
c_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a[i], b, c[i]), half_t(0));
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(
Array<half_t, N> const &a,
Array<half_t, N> const &b,
half_t const &c) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *a_ptr = reinterpret_cast<__half2 const *>(&a);
__half2 const *b_ptr = reinterpret_cast<__half2 const *>(&b);
__half2 c_pair = __half2half2(reinterpret_cast<__half const &>(c));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hfma2_relu(a_ptr[i], b_ptr[i], c_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&a);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&b);
__half d_residual = __hfma_relu(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1],
reinterpret_cast<__half const &>(c));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
multiply_add<half_t> op;
maximum<half_t> mx;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = mx(op(a[i], b[i], c), half_t(0));
}
#endif
return result;
}
};
template <int N>
struct minimum<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmin2(lhs_ptr[i], rhs_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmin(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (rhs[i] < lhs[i] ? rhs[i] : lhs[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmin2(lhs_pair, rhs_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmin(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (rhs[i] < lhs ? rhs[i] : lhs);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmin2(lhs_ptr[i], rhs_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hmin(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (rhs < lhs[i] ? rhs : lhs[i]);
}
#endif
return result;
}
};
template <int N>
struct maximum<Array<half_t, N>> {
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmax2(lhs_ptr[i], rhs_ptr[i]);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmax(
a_residual_ptr[N - 1],
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (lhs[i] < rhs[i] ? rhs[i] : lhs[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(half_t const & lhs, Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 lhs_pair = __half2half2(reinterpret_cast<__half const &>(lhs));
__half2 const *rhs_ptr = reinterpret_cast<__half2 const *>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmax2(lhs_pair, rhs_ptr[i]);
}
if (N % 2) {
__half const *b_residual_ptr = reinterpret_cast<__half const *>(&rhs);
__half d_residual = __hmax(
reinterpret_cast<__half const &>(lhs),
b_residual_ptr[N - 1]);
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (lhs < rhs[i] ? rhs[i] : lhs);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const & lhs, half_t const &rhs) const {
Array<half_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
__half2 *result_ptr = reinterpret_cast<__half2 *>(&result);
__half2 const *lhs_ptr = reinterpret_cast<__half2 const *>(&lhs);
__half2 rhs_pair = __half2half2(reinterpret_cast<__half const &>(rhs));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = __hmax2(lhs_ptr[i], rhs_pair);
}
if (N % 2) {
__half const *a_residual_ptr = reinterpret_cast<__half const *>(&lhs);
__half d_residual = __hmax(
a_residual_ptr[N - 1],
reinterpret_cast<__half const &>(rhs));
result[N - 1] = reinterpret_cast<half_t const &>(d_residual);
}
#else
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = (lhs[i] < rhs ? rhs : lhs[i]);
}
#endif
return result;
}
};
/// Fused multiply-add
template <int N>
struct multiply_add<Array<bfloat16_t, N>, Array<bfloat16_t, N>, Array<bfloat16_t, N>> {
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
Array<bfloat16_t, N> const &a,
Array<bfloat16_t, N> const &b,
Array<bfloat16_t, N> const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *a_ptr = reinterpret_cast<unsigned const *>(&a);
unsigned const *b_ptr = reinterpret_cast<unsigned const *>(&b);
unsigned const *c_ptr = reinterpret_cast<unsigned const *>(&c);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_ptr[i]), "r"(b_ptr[i]), "r"(c_ptr[i])
);
}
if (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[N - 1])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
bfloat16_t const &a,
Array<bfloat16_t, N> const &b,
Array<bfloat16_t, N> const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *b_ptr = reinterpret_cast<unsigned const *>(&b);
unsigned const *c_ptr = reinterpret_cast<unsigned const *>(&c);
unsigned a_packed = static_cast<unsigned>(a.raw());
a_packed = (a_packed | (a_packed << 16));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_packed), "r"(b_ptr[i]), "r"(c_ptr[i])
);
}
if (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[0]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[N - 1])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a, b[i], c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
Array<bfloat16_t, N> const &a,
bfloat16_t const &b,
Array<bfloat16_t, N> const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *a_ptr = reinterpret_cast<unsigned const *>(&a);
unsigned const *c_ptr = reinterpret_cast<unsigned const *>(&c);
unsigned b_packed = static_cast<unsigned>(b.raw());
b_packed = (b_packed | (b_packed << 16));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_ptr[i]), "r"(b_packed), "r"(c_ptr[i])
);
}
if (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[0]), "h"(c_residual_ptr[N - 1])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b, c[i]);
}
#endif
return result;
}
CUTLASS_HOST_DEVICE
Array<bfloat16_t, N> operator()(
Array<bfloat16_t, N> const &a,
Array<bfloat16_t, N> const &b,
bfloat16_t const &c) const {
Array<bfloat16_t, N> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
unsigned *result_ptr = reinterpret_cast<unsigned *>(&result);
unsigned const *a_ptr = reinterpret_cast<unsigned const *>(&a);
unsigned const *b_ptr = reinterpret_cast<unsigned const *>(&b);
unsigned c_packed = static_cast<unsigned>(c.raw());
c_packed = (c_packed | (c_packed << 16));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm ("fma.rn.bf16x2 %0, %1, %2, %3;\n"
: "=r"(result_ptr[i])
: "r"(a_ptr[i]), "r"(b_ptr[i]), "r"(c_packed)
);
}
if (N % 2) {
uint16_t *result_ptr = reinterpret_cast<uint16_t *>(&result);
uint16_t const *a_residual_ptr = reinterpret_cast<uint16_t const *>(&a);
uint16_t const *b_residual_ptr = reinterpret_cast<uint16_t const *>(&b);
uint16_t const *c_residual_ptr = reinterpret_cast<uint16_t const *>(&c);
asm ("fma.rn.bf16 %0, %1, %2, %3;\n"
: "=h"(result_ptr[N - 1])
: "h"(a_residual_ptr[N - 1]), "h"(b_residual_ptr[N - 1]), "h"(c_residual_ptr[0])
);
}
#else
multiply_add<bfloat16_t> op;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
result[i] = op(a[i], b[i], c);
}
#endif
return result;
}
};
/// bit_and
template <int N>
struct bit_and<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a, Array<uint1b_t, N> const &b) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
Storage const *b_data = b.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (a_data[i] & b_data[i]);
}
return result;
}
};
/// bit_or
template <int N>
struct bit_or<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a, Array<uint1b_t, N> const &b) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
Storage const *b_data = b.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (a_data[i] | b_data[i]);
}
return result;
}
};
/// bit_not
template <int N>
struct bit_not<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (~a_data[i]);
}
return result;
}
};
/// bit_xor
template <int N>
struct bit_xor<Array<uint1b_t, N>> {
CUTLASS_HOST_DEVICE
Array<uint1b_t, N> operator()(Array<uint1b_t, N> const &a, Array<uint1b_t, N> const &b) const {
using ArrayType = Array<uint1b_t, N>;
using Storage = typename ArrayType::Storage;
ArrayType result;
Storage *result_data = result.raw_data();
Storage const *a_data = a.raw_data();
Storage const *b_data = b.raw_data();
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < ArrayType::kStorageElements; ++i) {
result_data[i] = (a_data[i] ^ b_data[i]);
}
return result;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Operator overloads
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator+(Array<T, N> const &lhs, Array<T, N> const &rhs) {
plus<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator-(Array<T, N> const &lhs, Array<T, N> const &rhs) {
minus<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator-(Array<T, N> const &lhs) {
negate<Array<T, N>> op;
return op(lhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator*(Array<T, N> const &lhs, Array<T, N> const &rhs) {
multiplies<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator*(T lhs, Array<T, N> const &rhs) {
multiplies<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator*(Array<T, N> const &lhs, T rhs) {
multiplies<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> operator/(Array<T, N> const &lhs, Array<T, N> const &rhs) {
divides<Array<T, N>> op;
return op(lhs, rhs);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(Array<T, N> const &a, Array<T, N> const &b, Array<T, N> const &c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(T a, Array<T, N> const &b, Array<T, N> const &c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(Array<T, N> const &a, T b, Array<T, N> const &c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
template <typename T, int N>
CUTLASS_HOST_DEVICE
Array<T, N> fma(Array<T, N> const &a, Array<T, N> const &b, T c) {
multiply_add<Array<T, N>> op;
return op(a, b, c);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/array_subbyte.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
// AlignedArray
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Aligned array type
template <
/// Element type
typename T,
/// Number of elements in the array
int N,
/// Alignment requirement in bytes
int Alignment = sizeof_bits<T>::value * N / 8
>
class alignas(Alignment) AlignedArray: public Array<T, N> {
public:
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 62,373 | C | 24.584085 | 118 | 0.558399 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/bfloat16.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a proxy class for storing non-standard 16-bit floating point values with
8 bits of exponent and 7 bit of mantissa.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
#include "cutlass/cutlass.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Floating-point type with 8 bits of exponent and 7 bits of mantissa.
struct alignas(2) bfloat16_t {
//
// Data members
//
/// Storage type
uint16_t storage;
//
// Methods
//
/// Constructs from an unsigned short
CUTLASS_HOST_DEVICE
static bfloat16_t bitcast(uint16_t x) {
bfloat16_t h;
h.storage = x;
return h;
}
/// Default constructor
bfloat16_t() = default;
/// Floating-point conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(float x) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800) && (__CUDACC_VER_MAJOR__ >= 11)
asm("cvt.rn.bf16.f32 %0, %1;\n" : "=h"(storage) : "f"(x));
#else
uint32_t bits;
#if defined(__CUDA_ARCH__)
bits = reinterpret_cast<uint32_t &>(x);
#else
std::memcpy(&bits, &x, sizeof(bits));
#endif
if ((bits & 0x7f800000) != 0x7f800000) {
bool mantissa_bit = ((bits & (1 << 16)) != 0);
bool round_bit = ((bits & (1 << 15)) != 0);
bool sticky_bit = ((bits & ((1 << 15) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) {
bits += uint32_t(1 << 16);
}
}
else if (bits & ~0xff800000) {
bits = 0x7fffffff;
}
storage = uint16_t((bits >> 16) & 0xffff);
#endif
}
/// Floating-point conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(double x): bfloat16_t(float(x)) {
}
/// Integer conversion - round toward nearest
CUTLASS_HOST_DEVICE
explicit bfloat16_t(int x) {
float flt = static_cast<float>(x);
uint32_t bits;
#if defined(__CUDA_ARCH__)
bits = reinterpret_cast<uint32_t &>(flt);
#else
std::memcpy(&bits, &flt, sizeof(bits));
#endif
storage = uint16_t(bits >> 16);
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
unsigned bits = (unsigned(storage) << 16);
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const &>(bits);
#else
float flt;
std::memcpy(&flt, &bits, sizeof(flt));
return flt;
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(float(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (float(*this) != 0.0f);
}
/// Obtains raw bits
CUTLASS_HOST_DEVICE
uint16_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((raw() & 0x8000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((raw() >> 7) & 0x0ff);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 127;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(raw() & 0x7f);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::bfloat16_t const& h) {
return h.signbit();
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t abs(cutlass::bfloat16_t const& h) {
return cutlass::bfloat16_t::bitcast(h.raw() & 0x7fffffff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() == 0x0ff) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() != 0x0ff);
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t nan_bf16(const char*) {
// NVIDIA canonical NaN
return cutlass::bfloat16_t::bitcast(0x7fff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::bfloat16_t const& h) {
return (h.exponent_biased() == 0x0ff) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::bfloat16_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x0ff;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::bfloat16_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x0ff) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t sqrt(cutlass::bfloat16_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::bfloat16_t(sqrtf(float(h)));
#else
return cutlass::bfloat16_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
bfloat16_t copysign(bfloat16_t const& a, bfloat16_t const& b) {
uint16_t a_bits;
uint16_t b_bits;
#if defined(__CUDA_ARCH__)
a_bits = reinterpret_cast<uint16_t const &>(a);
b_bits = reinterpret_cast<uint16_t const &>(b);
#else
std::memcpy(&a_bits, &a, sizeof(a_bits));
std::memcpy(&b_bits, &b, sizeof(b_bits));
#endif
uint16_t a_mag = (a_bits & 0x7fff);
uint16_t b_sign = (b_bits & 0x8000);
uint16_t result = (a_mag | b_sign);
return bfloat16_t::bitcast(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace std {
#if !defined(__CUDACC_RTC__)
/// Numeric limits
template <>
struct numeric_limits<cutlass::bfloat16_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 7;
/// Least positive value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t min() { return cutlass::bfloat16_t::bitcast(0x01); }
/// Minimum finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t lowest() { return cutlass::bfloat16_t::bitcast(0xff7f); }
/// Maximum finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t max() { return cutlass::bfloat16_t::bitcast(0x7f7f); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t epsilon() { return cutlass::bfloat16_t::bitcast(0x1000); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t round_error() { return cutlass::bfloat16_t(0.5f); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t infinity() { return cutlass::bfloat16_t::bitcast(0x7f80); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t quiet_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t signaling_NaN() { return cutlass::bfloat16_t::bitcast(0x7fff); }
/// Returns smallest finite value
CUTLASS_HOST_DEVICE
static cutlass::bfloat16_t denorm_min() { return cutlass::bfloat16_t::bitcast(0x1); }
};
#endif
} // namespace std
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
bfloat16_t operator+(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator-(bfloat16_t const& lhs) {
return bfloat16_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator-(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator*(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t operator/(bfloat16_t const& lhs, bfloat16_t const& rhs) {
return bfloat16_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator+=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator-=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator*=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator/=(bfloat16_t & lhs, bfloat16_t const& rhs) {
lhs = bfloat16_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator++(bfloat16_t & lhs) {
float tmp(lhs);
++tmp;
lhs = bfloat16_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t& operator--(bfloat16_t & lhs) {
float tmp(lhs);
--tmp;
lhs = bfloat16_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
bfloat16_t operator++(bfloat16_t & lhs, int) {
bfloat16_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = bfloat16_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
bfloat16_t operator--(bfloat16_t & lhs, int) {
bfloat16_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = bfloat16_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t operator "" _bf16(long double x) {
return cutlass::bfloat16_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::bfloat16_t operator "" _bf16(unsigned long long int x) {
return cutlass::bfloat16_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,371 | C | 25.690619 | 100 | 0.60878 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/aligned_buffer.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief AlignedBuffer is a container for trivially copyable elements suitable for use in
unions and shared memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Modifies semantics of cutlass::Array<> to provide guaranteed alignment.
template <
typename T,
int N,
int Align = 16
>
struct AlignedBuffer {
/// Internal storage type
using Storage = uint8_t;
/// Number of logical elements held in buffer
static int const kCount = N;
/// Alignment requirement in bytes
static int const kAlign = Align;
/// Number of storage elements
static int const kBytes =
(sizeof_bits<T>::value * N + 7) / 8;
private:
/// Internal storage
alignas(Align) Storage storage[kBytes];
public:
//
// C++ standard members
//
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type *pointer;
typedef value_type const * const_pointer;
using Array = Array<T, N>;
using reference = typename Array::reference;
using const_reference = typename Array::const_reference;
public:
CUTLASS_HOST_DEVICE
pointer data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer data() const {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
Storage * raw_data() {
return storage;
}
CUTLASS_HOST_DEVICE
Storage const * raw_data() const {
return storage;
}
CUTLASS_HOST_DEVICE
constexpr bool empty() const {
return !kCount;
}
CUTLASS_HOST_DEVICE
constexpr size_type size() const {
return kCount;
}
CUTLASS_HOST_DEVICE
constexpr size_type max_size() const {
return kCount;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 3,793 | C | 28.184615 | 100 | 0.649618 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/uint128.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines an unsigned 128b integer with several operators to support 64-bit integer division.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#include <cstdlib>
#include <cmath>
#include <type_traits>
#include <stdexcept>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Optionally enable GCC's built-in type
#if defined(__x86_64) && !defined(__CUDA_ARCH__) && defined(__GNUC__)
#define CUTLASS_UINT128_NATIVE
#elif defined(_MSC_VER) && defined(_M_AMD64) && !defined(__CUDA_ARCH__)
#define CUTLASS_INT128_ARITHMETIC
#include <intrin.h>
#if _MSC_VER >= 1920
#define CUTLASS_INT128_ARITHMETIC_DIV
#include <immintrin.h>
#endif
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
///! Unsigned 128b integer type
struct uint128_t {
/// Size of one part of the uint's storage in bits
int const kPartSize = sizeof_bits<uint64_t>::value;
struct hilo {
uint64_t lo;
uint64_t hi;
hilo() = default;
CUTLASS_HOST_DEVICE hilo(uint64_t lo_, uint64_t hi_):lo(lo_), hi(hi_) {}
};
// Use a union to store either low and high parts or, if present, a built-in 128b integer type.
union {
struct hilo hilo_;
#if defined(CUTLASS_UINT128_NATIVE)
unsigned __int128 native;
#endif // defined(CUTLASS_UINT128_NATIVE)
};
//
// Methods
//
/// Default ctor
uint128_t() = default;
/// Constructor from uint64
CUTLASS_HOST_DEVICE
uint128_t(uint64_t lo_): hilo_(lo_, 0) { }
/// Constructor from two 64b unsigned integers
CUTLASS_HOST_DEVICE
uint128_t(uint64_t lo_, uint64_t hi_): hilo_(lo_, hi_) {
}
/// Optional constructor from native value
#if defined(CUTLASS_UINT128_NATIVE)
uint128_t(unsigned __int128 value): native(value) { }
#endif
/// Lossily cast to uint64
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return hilo_.lo;
}
CUTLASS_HOST_DEVICE
static void exception() {
#if defined(__CUDA_ARCH__)
asm volatile (" brkpt;\n");
#else
// throw std::runtime_error("Not yet implemented.");
abort();
#endif
}
/// Add
CUTLASS_HOST_DEVICE
uint128_t operator+(uint128_t const &rhs) const {
uint128_t y;
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native + rhs.native;
#else
y.hilo_.lo = hilo_.lo + rhs.hilo_.lo;
y.hilo_.hi = hilo_.hi + rhs.hilo_.hi + (!y.hilo_.lo && (rhs.hilo_.lo));
#endif
return y;
}
/// Subtract
CUTLASS_HOST_DEVICE
uint128_t operator-(uint128_t const &rhs) const {
uint128_t y;
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native - rhs.native;
#else
y.hilo_.lo = hilo_.lo - rhs.hilo_.lo;
y.hilo_.hi = hilo_.hi - rhs.hilo_.hi - (rhs.hilo_.lo && y.hilo_.lo > hilo_.lo);
#endif
return y;
}
/// Multiply by unsigned 64b integer yielding 128b integer
CUTLASS_HOST_DEVICE
uint128_t operator*(uint64_t const &rhs) const {
uint128_t y;
#if defined(CUTLASS_UINT128_NATIVE)
y.native = native * rhs;
#elif defined(CUTLASS_INT128_ARITHMETIC)
// Multiply by the low part
y.hilo_.lo = _umul128(hilo_.lo, rhs, &y.hilo_.hi);
// Add the high part and ignore the overflow
uint64_t overflow;
y.hilo_.hi += _umul128(hilo_.hi, rhs, &overflow);
#else
// TODO - not implemented
CUTLASS_UNUSED(rhs);
exception();
#endif
return y;
}
/// Divide 128b operation by 64b operation yielding a 64b quotient
CUTLASS_HOST_DEVICE
uint64_t operator/(uint64_t const &divisor) const {
uint64_t quotient = 0;
#if defined(CUTLASS_UINT128_NATIVE)
quotient = uint64_t(native / divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
uint64_t remainder = 0;
quotient = _udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
// TODO - not implemented
CUTLASS_UNUSED(divisor);
exception();
#endif
return quotient;
}
/// Divide 128b operation by 64b operation yielding a 64b quotient
CUTLASS_HOST_DEVICE
uint64_t operator%(uint64_t const &divisor) const {
uint64_t remainder = 0;
#if defined(CUTLASS_UINT128_NATIVE)
remainder = uint64_t(native % divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
(void)_udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
// TODO - not implemented
CUTLASS_UNUSED(divisor);
exception();
#endif
return remainder;
}
/// Computes the quotient and remainder in a single method.
CUTLASS_HOST_DEVICE
uint64_t divmod(uint64_t &remainder, uint64_t divisor) const {
uint64_t quotient = 0;
#if defined(CUTLASS_UINT128_NATIVE)
quotient = uint64_t(native / divisor);
remainder = uint64_t(native % divisor);
#elif defined(CUTLASS_INT128_ARITHMETIC_DIV)
// implemented using MSVC's arithmetic intrinsics
quotient = _udiv128(hilo_.hi, hilo_.lo, divisor, &remainder);
#else
// TODO - not implemented
CUTLASS_UNUSED(remainder);
CUTLASS_UNUSED(divisor);
exception();
#endif
return quotient;
}
/// Left-shifts a 128b unsigned integer
CUTLASS_HOST_DEVICE
uint128_t operator<<(int sh) const {
if (sh == 0) {
return *this;
}
else if (sh >= kPartSize) {
return uint128_t(0, hilo_.lo << (sh - kPartSize));
}
else {
return uint128_t(
(hilo_.lo << sh),
(hilo_.hi << sh) | uint64_t(hilo_.lo >> (kPartSize - sh))
);
}
}
/// Right-shifts a 128b unsigned integer
CUTLASS_HOST_DEVICE
uint128_t operator>>(int sh) const {
if (sh == 0) {
return *this;
}
else if (sh >= kPartSize) {
return uint128_t((hilo_.hi >> (sh - kPartSize)), 0);
}
else {
return uint128_t(
(hilo_.lo >> sh) | (hilo_.hi << (kPartSize - sh)),
(hilo_.hi >> sh)
);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 8,139 | C | 28.81685 | 100 | 0.616538 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/array_planar_complex.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Array holding planar complex elements
template <typename Element_, int N>
struct ArrayPlanarComplex {
/// Underlying real element
using Element = Element_;
/// Number of logical elements
static size_t const kElements = N;
/// Underlying Fragment of real-valued elemenets
using ArrayReal = Array<Element, N>;
public:
/// Fragment of real-valued elements representing the real part
ArrayReal real;
/// Fragment of real-valued elements representing the imaginary part
ArrayReal imag;
public:
/// Ctor
CUTLASS_HOST_DEVICE
ArrayPlanarComplex() { }
/// Ctor
CUTLASS_HOST_DEVICE
ArrayPlanarComplex(
ArrayReal const &real_,
ArrayReal const &imag_
):
real(real_), imag(imag_) { }
/// Sets the array to zero efficiently
CUTLASS_HOST_DEVICE
void clear() {
real.clear();
imag.clear();
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to deduce template arguments
template <typename Element, int N>
CUTLASS_HOST_DEVICE
ArrayPlanarComplex<Element, N>
make_ArrayPlanarComplex(Array<Element, N> const &real, Array<Element, N> const &imag) {
return ArrayPlanarComplex<Element, N>(real, imag);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 3,662 | C | 34.221154 | 100 | 0.604588 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/coord.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief A Coord is a coordinate of arbitrary rank into a tensor or matrix
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <stdint.h>
#endif
#include "cutlass/cutlass.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically-sized array specifying Coords within a tensor
template <
int Rank_, ///< Logical rank of coordinate
typename Index_ = int, ///< Index type used for each dimension
typename LongIndex_ = int64_t ///< Long index type used for linear offsets
>
struct Coord {
public:
//
// Type and constant definitions
//
/// Number of elements in Coord
static int const kRank = Rank_;
/// Index type used to store elements
using Index = Index_;
/// Type used to represent linear offsets
using LongIndex = LongIndex_;
private:
//
// Data members
//
/// Indices
Index idx[kRank];
public:
//
// Methods
//
/// Default ctor initializes uniformly
CUTLASS_HOST_DEVICE
explicit Coord(Index value = Index(0)) {
for (int i = 0; i < kRank; ++i) {
idx[i] = value;
}
}
/// Constructs from an array of integers
CUTLASS_HOST_DEVICE
Coord(Index const (&_idx)[kRank]) {
for (int i = 0; i < kRank; ++i) {
idx[i] = _idx[i];
}
}
/// Constructs from some other Coord
template <int R, typename I, typename L>
CUTLASS_HOST_DEVICE
Coord(Coord<R, I, L> other) {
for (int i = 0; i < kRank; ++i) {
idx[i] = other[i];
}
}
/// Returns a slice of the Coord which may be larger or smaller in rank
/// than this.
template <int Slice>
CUTLASS_HOST_DEVICE
Coord<Slice, Index, LongIndex> slice(int start = 0, Index identity = 0) const {
Coord<Slice, Index, LongIndex> result;
for (int i = 0; i < Slice; ++i) {
if (i + start < kRank) {
result[i] = idx[i + start];
}
else {
result[i] = identity;
}
}
return result;
}
/// Returns the index of the dimension with least value
CUTLASS_HOST_DEVICE
int min_dim_index() const {
int i = 0;
for (int j = 1; j < kRank; ++j) {
if (idx[j] < idx[i]) {
i = j;
}
}
return i;
}
/// Returns the index of the dimension with greatest value
CUTLASS_HOST_DEVICE
int max_dim_index() const {
int i = 0;
for (int j = 1; j < kRank; ++j) {
if (idx[j] > idx[i]) {
i = j;
}
}
return i;
}
/// Returns true if Coord is non-zero.
CUTLASS_HOST_DEVICE
explicit operator bool() const {
for (int i = 0; i < kRank; ++i) {
if (idx[i]) {
return true;
}
}
return false;
}
/// Returns true if Coord is uniformly zero.
CUTLASS_HOST_DEVICE
bool operator!() const {
for (int i = 0; i < kRank; ++i) {
if (idx[i]) {
return false;
}
}
return true;
}
/// Element-wise addition
CUTLASS_HOST_DEVICE
Coord operator+(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] + b.idx[i];
}
return c;
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
Coord operator-(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] - b.idx[i];
}
return c;
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
Coord operator*(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] * b.idx[i];
}
return c;
}
/// Element-wise division
CUTLASS_HOST_DEVICE
Coord operator/(Coord const& b) const {
Coord c;
for (int i = 0; i < kRank; ++i) {
c.idx[i] = idx[i] / b.idx[i];
}
return c;
}
/// In-place addition
CUTLASS_HOST_DEVICE
Coord& operator+=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] += b.idx[i];
}
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
Coord& operator-=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] -= b.idx[i];
}
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
Coord& operator*=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] *= b.idx[i];
}
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
Coord& operator/=(Coord const& b) {
for (int i = 0; i < kRank; ++i) {
idx[i] /= b.idx[i];
}
return *this;
}
/// Member access operator
CUTLASS_HOST_DEVICE Index& operator[](int dim) { return idx[dim]; }
/// Member access operator
CUTLASS_HOST_DEVICE Index const& operator[](int dim) const { return idx[dim]; }
/// Computes the dot product with anotherCoord object
CUTLASS_HOST_DEVICE
LongIndex dot(Coord const& b, LongIndex sum = LongIndex(0)) const {
for (int i = 0; i < kRank; ++i) {
sum += idx[i] * b.idx[i];
}
return sum;
}
/// Gets the index of a given Coord element
template <int Dim>
CUTLASS_HOST_DEVICE Index& at() {
return idx[Dim];
}
/// Access via index; may limit unrolling potential
CUTLASS_HOST_DEVICE
Index& at(int dim) { return idx[dim]; }
/// Gets the index of a given Coord element
template <int Dim>
CUTLASS_HOST_DEVICE Index const& at() const {
return idx[Dim];
}
/// Access via index; may limit unrolling potential
CUTLASS_HOST_DEVICE
Index const& at(int dim) const { return idx[dim]; }
/// Determines if two Coord<> objects are equal
CUTLASS_HOST_DEVICE
bool operator==(Coord const& b) const {
bool equal = true;
for (int i = 0; equal && i < kRank; ++i) {
equal = (idx[i] == b.idx[i]);
}
return equal;
}
/// Not equal
CUTLASS_HOST_DEVICE
bool operator!=(Coord const& b) const { return !(*this == b); }
/// Clamps a coordinate to a range specified by maximum and minimum values
CUTLASS_HOST_DEVICE
Coord& clamp(Coord const& max, Coord const& min = Coord()) {
for (int i = 0; i < kRank; ++i) {
idx[i] = __NV_STD_MAX(__NV_STD_MIN(idx[i], max.idx[i]), min.idx[i]);
}
return *this;
}
/// Returns the sum of all elements
CUTLASS_HOST_DEVICE
Index sum() const {
Index sum_(idx[0]);
for (int i = 1; i < kRank; ++i) {
sum_ += idx[i];
}
return sum_;
}
/// Returns the product of all elements
CUTLASS_HOST_DEVICE
LongIndex product() const {
LongIndex product_(idx[0]);
for (int i = 1; i < kRank; ++i) {
product_ *= idx[i];
}
return product_;
}
/// Less than operator
CUTLASS_HOST_DEVICE
bool operator<(Coord const &b) const {
for (int i = 0; i < kRank; ++i) {
if (!(idx[i] < b[i])) {
return false;
}
}
return true;
}
/// Less than or equals operator
CUTLASS_HOST_DEVICE
bool operator<=(Coord const &b) const {
for (int i = 0; i < kRank; ++i) {
if (!(idx[i] <= b[i])) {
return false;
}
}
return true;
}
/// Greater than operator
CUTLASS_HOST_DEVICE
bool operator>(Coord const &b) const {
return !(*this <= b);
}
/// Greater than or equals operator
CUTLASS_HOST_DEVICE
bool operator>=(Coord const &b) const {
return !(*this < b);
}
};
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/// Scalar multiplication
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator*(Index s, Coord<Rank, Index> coord) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] *= s;
}
return coord;
}
/// Scalar multiplication
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator*(Coord<Rank, Index> coord, Index s) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] *= s;
}
return coord;
}
/// Scalar division
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator/(Index s, Coord<Rank, Index> coord) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] = s / coord[i];
}
return coord;
}
/// Scalar division
template <int Rank, typename Index>
CUTLASS_HOST_DEVICE
Coord<Rank, Index> operator/(Coord<Rank, Index> coord, Index s) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank; ++i) {
coord[i] /= s;
}
return coord;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Integer-valued make_Coord
//
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper to make a 2-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<1, T> make_Coord(T _0) {
T values[1] = {_0};
return Coord<1, T>(values);
}
/// Helper to make a 2-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<2, T> make_Coord(T _0, T _1) {
T values[2] = {_0, _1};
return Coord<2, T>(values);
}
/// Helper to make a 3-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<3, T> make_Coord(T _0, T _1, T _2) {
T values[3] = {_0, _1, _2};
return Coord<3, T>(values);
}
/// Helper to make a 4-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<4, T> make_Coord(T _0, T _1, T _2, T _3) {
T values[4] = {_0, _1, _2, _3};
return Coord<4, T>(values);
}
/// Helper to make a 5-element coordinate
template <typename T>
CUTLASS_HOST_DEVICE
Coord<5, T> make_Coord(T _0, T _1, T _2, T _3, T _4) {
T values[5] = {_0, _1, _2, _3, _4};
return Coord<5, T>(values);
}
/// Helper to make a 1-element coordinate
template <int N, typename T>
CUTLASS_HOST_DEVICE
Coord<N, T>make_Coord_with_padding(T _0) {
Coord<N, T> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = N - 1; i > 0; --i) {
coord[i] = 0;
}
coord[0] = _0;
return coord;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 11,827 | C | 23.590437 | 100 | 0.573856 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/semaphore.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implementation of a CTA-wide semaphore for inter-CTA synchronization.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/gemm/gemm.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CTA-wide semaphore for inter-CTA synchronization.
class Semaphore {
public:
int *lock;
bool wait_thread;
int state;
public:
/// Implements a semaphore to wait for a flag to reach a given value
CUTLASS_HOST_DEVICE
Semaphore(int *lock_, int thread_id):
lock(lock_),
wait_thread(thread_id < 0 || thread_id == 0),
state(-1) {
}
/// Permit fetching the synchronization mechanism early
CUTLASS_DEVICE
void fetch() {
if (wait_thread) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
asm volatile ("ld.global.acquire.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(lock));
#else
asm volatile ("ld.global.cg.b32 %0, [%1];\n" : "=r"(state) : "l"(lock));
#endif
}
}
/// Gets the internal state
CUTLASS_DEVICE
int get_state() const {
return state;
}
/// Waits until the semaphore is equal to the given value
CUTLASS_DEVICE
void wait(int status = 0) {
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
while( __syncthreads_and(state != status) ) {
fetch();
}
__syncthreads();
#endif
}
/// Updates the lock with the given result
CUTLASS_DEVICE
void release(int status = 0) {
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
__syncthreads();
if (wait_thread) {
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
asm volatile ("st.global.release.gpu.b32 [%0], %1;\n" : : "l"(lock), "r"(status));
#else
asm volatile ("st.global.cg.b32 [%0], %1;\n" : : "l"(lock), "r"(status));
#endif
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,186 | C | 33.04065 | 100 | 0.581462 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/tensor_ref.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a structure containing strides, bounds, and a pointer to tensor data.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
#include "cutlass/platform/platform.h"
#include "cutlass/subbyte_reference.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Default layout function from coordinates in a tensor's index space into the n-D array held
/// in memory.
///
/// All layout functions must define at least the members shown in IdentityTensorLayout<>.
template <int Rank>
class IdentityTensorLayout {
public:
/// Logical rank of tensor
static int const kRank = Rank;
/// Rank of stride vector
static int const kStrideRank = Rank;
/// Index type used for coordinates
using Index = int32_t;
/// Long index type used for offsets
using LongIndex = int64_t;
/// Logical coordinate
using TensorCoord = Coord<kRank, Index>;
/// Stride vector
using Stride = Coord<kStrideRank, Index>;
private:
//
// Data members
//
/// Stride data member
Stride stride_;
public:
//
// Methods
//
CUTLASS_HOST_DEVICE
IdentityTensorLayout(Stride const &stride = Stride()): stride_(stride) { }
/// Returns the offset of a coordinate in linear memory
CUTLASS_HOST_DEVICE
LongIndex operator()(Coord<Rank> const &coord) const {
return coord.dot(stride_);
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride stride() const {
return stride_;
}
/// Returns the stride of the layout
CUTLASS_HOST_DEVICE
Stride & stride() {
return stride_;
}
/// Compute the number of contiguous elements needed to store a tensor with the given size
CUTLASS_HOST_DEVICE
LongIndex capacity(TensorCoord const &size) const {
int idx = stride_.max_dim_index();
return stride_[idx] * size[idx];
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/* \brief TensorRef is a template for objects pointing to the start of tensors of arbitrary rank
and layout within memory. A TensorRef combines a pointer and a Layout concept
Examples:
(These examples use helpers for matrix layouts defined in cutlass/layout/matrix.h)
1. Column-major matrix may be represented as a rank=2 tensor:
TensorRef<float, layout::ColumnMajor> A(ptr_A, ldm);
2. Row-major matrix may be represented as a rank=2 tensor:
TensorRef<float, layout::RowMajor> B(ptr_A, ldm);
3. An interleaved matrix may be represented as a rank=2 tensor:
TensorRef<int8_t, layout::ColumnMajorInterleaved<32> > C;
4. A helper exists to define a TensorRef for a contiguous matrix whose layout
is not known at compile time.
int ldm; // leading dimension
layout::Matrix kind; // Could be layout::Matrix::kRowMajor or layout::Matrix::kColumnMajor
TensorRef<int, layout::ContiguousMatrix> E(ptr_E, {ldm, kind});
*/
template <
/// Data type of element stored within tensor (concept: NumericType)
typename Element_,
/// Defines a mapping from logical coordinate to linear memory (concept: Layout)
typename Layout_
>
class TensorRef {
public:
/// Data type of individual access
using Element = Element_;
/// Mapping function from logical coordinate to linear memory
using Layout = Layout_;
/// Reference type to an element
using Reference = typename platform::conditional<
sizeof_bits<Element>::value >= 8,
Element &,
SubbyteReference<Element>
>::type;
/// Logical rank of tensor index space
static int const kRank = Layout::kRank;
/// Index type
using Index = typename Layout::Index;
/// Long index used for pointer offsets
using LongIndex = typename Layout::LongIndex;
/// Coordinate in logical tensor space
using TensorCoord = typename Layout::TensorCoord;
/// Layout's stride vector
using Stride = typename Layout::Stride;
/// TensorRef to constant data
using ConstTensorRef = TensorRef<
typename platform::remove_const<Element>::type const,
Layout>;
/// TensorRef to non-constant data
using NonConstTensorRef = TensorRef<
typename platform::remove_const<Element>::type,
Layout>;
/// Require at least rank=1. Mathematically, a rank=0 tensor would be considered to be a
/// scalar, but degenerate cases such as these are difficult to accommodate without
/// extensive C++ metaprogramming or support for zero-length arrays.
static_assert(kRank > 0, "Cannot define a zero-rank TensorRef");
private:
/// Pointer
Element* ptr_;
/// Layout object maps logical coordinates to linear offsets
Layout layout_;
public:
//
// Methods
//
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRef(): ptr_(nullptr) {
}
/// Constructs a TensorRef with a pointer and layout object.
CUTLASS_HOST_DEVICE
TensorRef(
Element *ptr, ///< pointer to start of tensor
Layout const &layout ///< layout object containing stride and mapping function
):
ptr_(ptr), layout_(layout) {
}
/// Converting constructor from TensorRef to non-constant data.
template<typename _Magic = int>
CUTLASS_HOST_DEVICE
TensorRef(
NonConstTensorRef const &ref, ///< TensorRef to non-const data
///SFINAE trick to avoid creating a copy-constructor when Element_ is already non-const
_Magic magic = (typename platform::enable_if< ! platform::is_same<NonConstTensorRef, TensorRef<Element_, Layout_> >::value, _Magic>::type)0
):
ptr_(ref.data()), layout_(ref.layout()) { }
/// Returns a reference to constant-valued tensor.
CUTLASS_HOST_DEVICE
ConstTensorRef const_ref() const {
return ConstTensorRef(ptr_, layout_);
}
CUTLASS_HOST_DEVICE
NonConstTensorRef non_const_ref() const {
return NonConstTensorRef(const_cast<typename platform::remove_const<Element>::type *>(ptr_), layout_);
}
/// Updates only the pointer
CUTLASS_HOST_DEVICE
void reset(Element* ptr = nullptr) {
ptr_ = ptr;
}
/// Updates the pointer and layout object
CUTLASS_HOST_DEVICE
void reset(Element* ptr, Layout const &layout) {
ptr_ = ptr;
layout_ = layout;
}
/// Returns true if the TensorRef is non-null
CUTLASS_HOST_DEVICE
bool good() const {
return ptr_ != nullptr;
}
/// Returns the pointer to referenced data
CUTLASS_HOST_DEVICE
Element * data() const { return ptr_; }
/// Returns a reference to the element at a given linear index
CUTLASS_HOST_DEVICE
Reference data(LongIndex idx) const {
return ReferenceFactory<typename platform::remove_const<Element>::type,
(sizeof_bits<Element>::value < 8)>::get(ptr_, idx);
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout & layout() {
return layout_;
}
/// Returns the layout object
CUTLASS_HOST_DEVICE
Layout layout() const {
return layout_;
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride stride() const {
return layout_.stride();
}
/// Returns the layout object's stride vector
CUTLASS_HOST_DEVICE
Stride & stride() {
return layout_.stride();
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
typename Layout::Stride::Index stride(int dim) const {
return layout_.stride().at(dim);
}
/// Returns the layout object's stride in a given physical dimension
CUTLASS_HOST_DEVICE
typename Layout::Stride::Index & stride(int dim) {
return layout_.stride().at(dim);
}
/// Computes the offset of an index from the origin of the tensor
CUTLASS_HOST_DEVICE
LongIndex offset(TensorCoord const& coord) const {
return layout_(coord);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(TensorCoord const& coord) const {
return data(offset(coord));
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference operator[](TensorCoord const& coord) const {
return data(offset(coord));
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRef & add_pointer_offset(LongIndex offset_) {
ptr_ += offset_;
return *this;
}
/// Adds an offset to each pointer
CUTLASS_HOST_DEVICE
TensorRef & add_coord_offset(TensorCoord const &coord) {
add_pointer_offset(offset(coord));
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef operator+(TensorCoord const& b) const {
TensorRef result(*this);
result.add_coord_offset(b);
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef & operator+=(TensorCoord const& b) {
add_coord_offset(b);
return *this;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef operator-(TensorCoord const& b) const {
TensorRef result(*this);
result.add_pointer_offset(-offset(b));
return result;
}
/// Returns a TensorRef offset by a given amount
CUTLASS_HOST_DEVICE
TensorRef & operator-=(TensorCoord const& b) {
add_pointer_offset(-offset(b));
return *this;
}
};
/// Constructs a TensorRef, deducing types from arguments.
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
TensorRef<Element, Layout> make_TensorRef(Element *ptr, Layout const &layout) {
return TensorRef<Element, Layout>(ptr, layout);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations to handle degenerate and sub-byte cases.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename Element,
typename Layout
>
CUTLASS_HOST_DEVICE
bool TensorRef_aligned(TensorRef<Element, Layout> const &ref, int alignment) {
int const kStrideRank = Layout::kStrideRank;
if (reinterpret_cast<uintptr_t>(ref.data()) % alignment) {
return false;
}
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStrideRank; ++i) {
if (ref.stride(i) % alignment) {
return false;
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 12,207 | C | 28.136038 | 143 | 0.659458 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/predicate_vector.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines container classes and iterators for managing a statically sized vector
of boolean predicates.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cassert>
#include <cuda/std/cstdint>
#else
#include <assert.h>
#include <stdint.h>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/*!@defgroup predicate_vector_concept Predicate Vector Concept
@{
Implementations of \ref predicate_vector_concept contain an ordered set of boolean predicates which
may be used as conditionals in other device-side operations. Both random access and iterators
offering sequential access are provided.
@par Predicate Vector
A \ref predicate_vector_concept satisfies the following expressions
- <b>at(int idx)</b> - returns the value of the indexed predicate
- <b>set(int idx, bool value)</b> - sets the value of the indexed predicate
- <b>begin()</b> - returns a \ref predicate_iterator_concept pointing to the first predicate
@}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
/*!@defgroup predicate_iterator_concept Predicate Iterator Concept
@{
Implementations of \ref predicate_iterator_concept enables accessing and traversing elements of a
bit vector.
@par Const Predicate Iterator
A const \ref predicate_iterator_concept satisfies the following expressions
- <b>++it</b> increments the iterator to the next predicate
- <b>*it</b> returns the value of the currently pointed-to predicate
@par Mutable Predicate Iterator
A \ref predicate_iterator_concept that is non-const <b>also</b> satisfies the following expressions
- <b>it.set(bool value)</b> sets the value of the currently pointed-to predicate
@}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
/*!@defgroup predicate_tile_adapter Predicate Tile Adapter Concept
@{
Implementations of \ref predicate_tile_adapter provide a mapping between a the elements of a \ref
tile_traits_concept and a \ref predicate_vector_concept.
@par Predicate Tile Adapter
A \ref predicate_tile_adapter satisfies the following expressions
- <b>at(int d, int h, int w, int c)</b> - returns the value of a predicate corresponding to the
access (d, h, w, c) within the tile.
@}
*/
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array of bits implementing @concept{predicate_vector_concept}.
template <
/// Number of predicates conatined in predicate vector
int kPredicates_,
/// Number of predicates contained in each byte of internal storage
int kPredicatesPerByte_ = 4,
/// Location of first predicate within byte of internal storage
int kPredicateStart_ = 0>
struct PredicateVector {
/// Number of bits stored by the PredicateVector
static int const kPredicates = kPredicates_;
/// Number of bits stored within each byte of the predicate bit vector
static int const kPredicatesPerByte = kPredicatesPerByte_;
/// First bit withing each byte containing predicates
static int const kPredicateStart = kPredicateStart_;
// Make sure no one tries to put more than 8 bits in a byte :)
static_assert(kPredicatesPerByte <= 8, "kPredicatesPerByte must fit within an actual byte");
// Make sure the "offsetted" bits fit in one byte.
static_assert(kPredicateStart + kPredicatesPerByte <= 8,
"The offsetted predicates must fit within an actual byte.");
/// Storage type of individual elements
typedef uint32_t Storage;
/// Number of bytes needed
static int const kBytes = (kPredicates + kPredicatesPerByte - 1) / kPredicatesPerByte;
/// Number of storage elements needed
static int const kWordCount = (kBytes + int(sizeof(Storage)) - 1) / int(sizeof(Storage));
private:
//
// Data members
//
/// Words of bit vector
Storage storageData[kWordCount];
//
// Methods
//
/// Computes the word and bit corresponding to a logical predicate index
CUTLASS_HOST_DEVICE void computeStorageOffset(int &word, int &bit, int idx) const {
CUTLASS_ASSERT(idx < kPredicates);
int byte = (idx / kPredicatesPerByte);
int bit_offset = (idx % kPredicatesPerByte);
word = byte / sizeof(Storage);
int byte_offset = (byte % sizeof(Storage));
bit = byte_offset * 8 + bit_offset + kPredicateStart;
}
/// Accesses a given word with optional assertions
CUTLASS_HOST_DEVICE Storage &storage(int word) {
CUTLASS_ASSERT(word < kWordCount);
return storageData[word];
}
/// Accesses a given word with optional assertions
CUTLASS_HOST_DEVICE Storage const &storage(int word) const {
CUTLASS_ASSERT(word < kWordCount);
return storageData[word];
}
public:
//
// Iterator
//
/**
* @brief An iterator implementing \ref predicate_iterator_concept enabling sequential
* read and write access to predicates.
* @concept{predicate_iterator_concept}
*/
class Iterator {
/// Reference to PredicateVector instance
PredicateVector &vec_;
/// Index into PredicateVector
int bit_;
public:
/// Copy constructor
CUTLASS_HOST_DEVICE
Iterator(Iterator const &it) : vec_(it.vec_), bit_(it.bit_) {}
/// Constructs an iterator from a PredicateVector
CUTLASS_HOST_DEVICE
Iterator(PredicateVector &vec, int _start = 0) : vec_(vec), bit_(_start) {}
/// Pre-increment
CUTLASS_HOST_DEVICE
Iterator &operator++() {
++bit_;
return *this;
}
/// Increment
CUTLASS_HOST_DEVICE
Iterator &operator+=(int offset) {
bit_ += offset;
return *this;
}
/// Pre-decrement
CUTLASS_HOST_DEVICE
Iterator &operator--() {
--bit_;
return *this;
}
/// Decrement
CUTLASS_HOST_DEVICE
Iterator &operator-=(int offset) {
bit_ -= offset;
return *this;
}
/// Post-increment
CUTLASS_HOST_DEVICE
Iterator operator++(int) {
Iterator ret(*this);
ret.bit_++;
return ret;
}
/// Post-decrement
CUTLASS_HOST_DEVICE
Iterator operator--(int) {
Iterator ret(*this);
ret.bit_--;
return ret;
}
/// Iterator advances by some amount
CUTLASS_HOST_DEVICE
Iterator operator+(int offset) {
Iterator ret(*this);
ret.bit_ += offset;
return ret;
}
/// Iterator recedes by some amount
CUTLASS_HOST_DEVICE
Iterator operator-(int offset) {
ConstIterator ret(*this);
ret.bit_ -= offset;
return ret;
}
/// Returns true if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator==(Iterator const &it) const { return bit_ == it.bit_; }
/// Returns false if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator!=(Iterator const &it) const { return bit_ != it.bit_; }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool get() { return vec_.at(bit_); }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool at() const { return vec_.at(bit_); }
/// Dereferences iterator
CUTLASS_HOST_DEVICE
bool operator*() const { return at(); }
/// Sets the bit at the pointed to location
CUTLASS_HOST_DEVICE
void set(bool value = true) { vec_.set(bit_, value); }
};
/**
* @brief An iterator implementing \ref predicate_iterator_concept enabling sequential
* read and write access to predicates.
* @concept{predicate_iterator_concept}
*/
class ConstIterator {
/// Reference to PredicateVector instance
PredicateVector const &vec_;
/// Index into PredicateVector
int bit_;
public:
/// Copy constructor
CUTLASS_HOST_DEVICE
ConstIterator(ConstIterator const &it) : vec_(it.vec_), bit_(it.bit_) {}
/// Constructs an iterator from a PredicateVector
CUTLASS_HOST_DEVICE
ConstIterator(PredicateVector const &vec, int _start = 0) : vec_(vec), bit_(_start) {}
/// Pre-increment
CUTLASS_HOST_DEVICE
ConstIterator &operator++() {
++bit_;
return *this;
}
/// Increment
CUTLASS_HOST_DEVICE
ConstIterator &operator+=(int offset) {
bit_ += offset;
return *this;
}
/// Pre-decrement
CUTLASS_HOST_DEVICE
ConstIterator &operator--() {
--bit_;
return *this;
}
/// Decrement
CUTLASS_HOST_DEVICE
ConstIterator &operator-=(int offset) {
bit_ -= offset;
return *this;
}
/// Post-increment
CUTLASS_HOST_DEVICE
ConstIterator operator++(int) {
ConstIterator ret(*this);
ret.bit_++;
return ret;
}
/// Post-decrement
CUTLASS_HOST_DEVICE
ConstIterator operator--(int) {
ConstIterator ret(*this);
ret.bit_--;
return ret;
}
/// Iterator advances by some amount
CUTLASS_HOST_DEVICE
ConstIterator operator+(int offset) {
ConstIterator ret(*this);
ret.bit_ += offset;
return ret;
}
/// Iterator recedes by some amount
CUTLASS_HOST_DEVICE
ConstIterator operator-(int offset) {
ConstIterator ret(*this);
ret.bit_ -= offset;
return ret;
}
/// Returns true if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator==(ConstIterator const &it) const { return bit_ == it.bit_; }
/// Returns false if iterators point to the same bit
CUTLASS_HOST_DEVICE
bool operator!=(ConstIterator const &it) const { return bit_ != it.bit_; }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool get() { return vec_.at(bit_); }
/// Gets the bit at the pointed to location
CUTLASS_HOST_DEVICE
bool at() const { return vec_.at(bit_); }
/// Dereferences iterator
CUTLASS_HOST_DEVICE
bool operator*() const { return at(); }
};
/// Iterator that always returns true
struct TrivialIterator {
/// Constructor
CUTLASS_HOST_DEVICE
TrivialIterator() {}
/// Copy constructor
CUTLASS_HOST_DEVICE
TrivialIterator(Iterator const &it) {}
/// Constructs an iterator from a PredicateVector
CUTLASS_HOST_DEVICE
TrivialIterator(PredicateVector const &_vec) {}
/// Pre-increment
CUTLASS_HOST_DEVICE
TrivialIterator &operator++() { return *this; }
/// Post-increment
CUTLASS_HOST_DEVICE
TrivialIterator operator++(int) { return *this; }
/// Dereferences iterator
CUTLASS_HOST_DEVICE
bool operator*() const { return true; }
};
public:
//
// Methods
//
/// Initialize the predicate vector
CUTLASS_HOST_DEVICE PredicateVector(bool value = true) { fill(value); }
/// Fills all predicates with a given value
CUTLASS_HOST_DEVICE void fill(bool value = true) {
Storage item = (value ? ~Storage(0) : Storage(0));
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = item;
}
}
/// Clears all predicates
CUTLASS_HOST_DEVICE void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = 0;
}
}
/// Sets all predicates to true
CUTLASS_HOST_DEVICE void enable() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = ~Storage(0);
}
}
/// Accesses a bit within the predicate vector.
CUTLASS_HOST_DEVICE bool operator[](int idx) const { return at(idx); }
/// Accesses a bit within the predicate vector.
CUTLASS_HOST_DEVICE bool at(int idx) const {
int bit, word;
computeStorageOffset(word, bit, idx);
return ((storage(word) >> bit) & 1);
}
/// Set a bit within the predicate vector.
CUTLASS_HOST_DEVICE void set(int idx, bool value = true) {
int bit, word;
computeStorageOffset(word, bit, idx);
Storage disable_mask = (~(Storage(1) << bit));
Storage enable_mask = (Storage(value) << bit);
storage(word) = ((storage(word) & disable_mask) | enable_mask);
}
/// Computes the intersection of two identical predicate vectors.
CUTLASS_HOST_DEVICE PredicateVector &operator&=(PredicateVector const &predicates) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = (storage(i) & predicates.storage(i));
}
return *this;
}
/// Computes the union of two identical predicate vectors.
CUTLASS_HOST_DEVICE PredicateVector &operator|=(PredicateVector const &predicates) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kWordCount; ++i) {
storage(i) = (storage(i) | predicates.storage(i));
}
return *this;
}
/// Returns true if entire predicate array is zero.
CUTLASS_HOST_DEVICE bool is_zero() const {
Storage mask(0);
for (int byte = 0; byte < sizeof(Storage); ++byte) {
Storage byte_mask = (((1 << kPredicatesPerByte) - 1) << kPredicateStart);
mask |= (byte_mask << (byte * 8));
}
uint32_t result = 0;
for (int word = 0; word < kWordCount; ++word) {
result |= storage(word);
}
return result == 0;
}
/// Returns an iterator to the start of the bit vector
CUTLASS_DEVICE
Iterator begin() { return Iterator(*this); }
/// Returns an iterator
CUTLASS_DEVICE
Iterator end() { return Iterator(*this, kPredicates); }
/// Returns a ConstIterator
CUTLASS_DEVICE
ConstIterator const_begin() const { return ConstIterator(*this); }
/// Returns a ConstIterator
CUTLASS_DEVICE
ConstIterator const_end() const { return ConstIterator(*this, kPredicates); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 15,565 | C | 28.649524 | 100 | 0.641311 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/blas3.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic include for CUTLASS BLAS3/HPC code.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/complex.h"
#include "cutlass/functional.h"
#include "cutlass/numeric_types.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Enumerated type describing the type of kernel (based on input or output matrices).
enum class BlasMode {
kGemm,
kSymmetric,
kHermitian,
kTriangular,
kInvalid
};
/// Enumerated type describing the fill mode for matrices for BLAS functions.
enum class FillMode {
kFull, /// The entire tensor is covered.
kLower, /// The 'lower' part of a tensor is covered including diagonal
kUpper, /// The 'upper' part of a tensor is covered including diaognal
kDiagonal, /// Only diagonal elements are covered.
kNone, /// No element is covered.
kInvalid
};
/// Enumerated type describing the diagonal property of matrices for BLAS functions.
enum class DiagType {
kNonUnit,
kUnit,
kZero, // Only used internally for computing SYMM/HEMM
kInvalid
};
/// Enumerated type describing the side dense matrix is in matrix equation for BLAS functions.
enum class SideMode {
kLeft,
kRight,
kInvalid
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines FillMode inversions
template <FillMode kFillMode>
struct InvertFillMode;
/// Invert FillMode lower to upper
template <>
struct InvertFillMode<FillMode::kLower> {
static FillMode const mode = FillMode::kUpper;
};
/// Invert FillMode upper to lower
template <>
struct InvertFillMode<FillMode::kUpper> {
static FillMode const mode = FillMode::kLower;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines SideMode inversions
template <SideMode kSideMode>
struct InvertSideMode;
/// Invert SideMode left to right
template <>
struct InvertSideMode<SideMode::kLeft> {
static SideMode const mode = SideMode::kRight;
};
/// Invert SideMode right to left
template <>
struct InvertSideMode<SideMode::kRight> {
static SideMode const mode = SideMode::kLeft;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines correct compare operation for Triangular matrix boundary
template <FillMode kFillMode, DiagType kDiagType = DiagType::kNonUnit>
struct TrMatrixCompareOp {
using Index = int32_t;
using Type = typename platform::conditional<
(kFillMode == FillMode::kLower),
greater_equal<Index>,
less_equal<Index>>::type;
};
template <FillMode kFillMode>
struct TrMatrixCompareOp <kFillMode, DiagType::kUnit> {
using Index = int32_t;
using Type = typename platform::conditional<
(kFillMode == FillMode::kLower),
greater_equal<Index>,
less_equal<Index>>::type;
};
template <FillMode kFillMode>
struct TrMatrixCompareOp <kFillMode, DiagType::kZero> {
using Index = int32_t;
using Type = typename platform::conditional<
(kFillMode == FillMode::kLower),
greater<Index>,
less<Index>>::type;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Returns precision in terms of bits (based on datatype) to fill tensors with.
// Defaults to 5 bits of mantissa for TF32 and FP32 (with implicit round-offs).
// Also defines acceptable mantissa result variance/error.
template <typename Element>
struct MantissaInBits {
static int constexpr bits = 5;
static double constexpr error = 1.0e-7;
};
// Full precision is supported for FP64
template <>
struct MantissaInBits<double> {
static int constexpr bits = 30;
static double constexpr error = 1.0e-15;
};
template <>
struct MantissaInBits<cutlass::complex<double>> {
static int constexpr bits = 30;
static double constexpr error = 1.0e-15;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 6,338 | C | 34.813559 | 100 | 0.599558 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/cutlass.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Basic include for CUTLASS.
*/
#pragma once
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef CUTLASS_NAMESPACE
#define cutlass CUTLASS_NAMESPACE
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#define CUTLASS_UNUSED(expr) do { ; } while (&expr != &expr)
#if !defined(__CUDACC_RTC__)
#include <assert.h>
#if defined(_MSC_VER)
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __FUNCSIG__)
#else
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __PRETTY_FUNCTION__)
#endif
#else
#if defined(_MSC_VER)
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __FUNCSIG__)
#else
#define CUTLASS_NOT_IMPLEMENTED() assert(0 && __PRETTY_FUNCTION__)
#endif
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_HOST_DEVICE __forceinline__ __device__ __host__
#define CUTLASS_DEVICE __forceinline__ __device__
#elif defined(__CUDACC_RTC__)
#define CUTLASS_HOST_DEVICE __forceinline__ __device__
#define CUTLASS_DEVICE __forceinline__ __device__
#else
#define CUTLASS_HOST_DEVICE inline
#define CUTLASS_DEVICE inline
#endif
/// Status code returned by CUTLASS operations
enum class Status {
kSuccess, ///< Operation was successful.
kErrorMisalignedOperand, ///< operands fail alignment requirements.
kErrorInvalidDataType, ///< DataType fails requirement.
kErrorInvalidLayout, ///< Layout fails alignment requirement.
kErrorInvalidProblem, ///< Specified problem size is not supported by operator.
kErrorNotSupported, ///< Operation is not supported on current device.
kErrorWorkspaceNull, ///< The given workspace is null when it is required to be non-null.
kErrorInternal, ///< An error within CUTLASS occurred.
kErrorArchMismatch, ///< CUTLASS runs on a device that it was not compiled for.
kErrorInsufficientDriver, ///< CUTLASS runs with a driver that is too old.
kErrorMemoryAllocation, ///< Kernel launch failed due to insufficient device memory.
kInvalid ///< Status is unspecified.
};
/// Convert cutlass status to status strings
CUTLASS_HOST_DEVICE
static char const* cutlassGetStatusString(cutlass::Status status) {
switch (status) {
case cutlass::Status::kSuccess:
return "Success";
case cutlass::Status::kErrorMisalignedOperand:
return "Error Misaligned Operand";
case cutlass::Status::kErrorInvalidDataType:
return "Error Invalid Data Type";
case cutlass::Status::kErrorInvalidLayout:
return "Error Invalid Layout";
case cutlass::Status::kErrorInvalidProblem:
return "Error Invalid Problem";
case cutlass::Status::kErrorNotSupported:
return "Error Not Supported";
case cutlass::Status::kErrorWorkspaceNull:
return "Error Workspace Null";
case cutlass::Status::kErrorInternal:
return "Error Internal";
case cutlass::Status::kErrorInsufficientDriver:
return "Error Insufficient Driver";
case cutlass::Status::kErrorArchMismatch:
return "Error Architecture Mismatch";
case cutlass::Status::kErrorMemoryAllocation:
return "Error Memory Allocation failed";
case cutlass::Status::kInvalid: break;
}
return "Invalid status";
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED
#define CUTLASS_CONV_UNIT_TEST_RIGOROUS_SIZE_ENABLED 0
#endif
// CUDA 10.1 introduces the mma instruction
#if !defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
#define CUTLASS_ENABLE_TENSOR_CORE_MMA 0
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
#define CUTLASS_ASSERT(x) assert(x)
////////////////////////////////////////////////////////////////////////////////////////////////////
// CUTLASS_PRAGMA_(UNROLL|NO_UNROLL) optimization directives for the CUDA compiler.
#if defined(__CUDA_ARCH__)
#if defined(__CUDACC_RTC__) || (defined(__clang__) && defined(__CUDA__))
#define CUTLASS_PRAGMA_UNROLL _Pragma("unroll")
#define CUTLASS_PRAGMA_NO_UNROLL _Pragma("unroll 1")
#else
#define CUTLASS_PRAGMA_UNROLL #pragma unroll
#define CUTLASS_PRAGMA_NO_UNROLL #pragma unroll 1
#endif
#define CUTLASS_GEMM_LOOP CUTLASS_PRAGMA_NO_UNROLL
#else
#define CUTLASS_PRAGMA_UNROLL
#define CUTLASS_PRAGMA_NO_UNROLL
#define CUTLASS_GEMM_LOOP
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
static const int NUM_THREADS_PER_WARP = 32;
static const int NUM_THREADS_PER_HALF_WARP = NUM_THREADS_PER_WARP / 2;
static const int NUM_THREADS_PER_QUAD = 4;
static const int NUM_THREADS_PER_QUAD_PAIR = NUM_THREADS_PER_QUAD * 2;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Helper function to return true when called by thread 0 of threadblock 0.
CUTLASS_HOST_DEVICE bool thread0() {
#if defined(__CUDA_ARCH__)
return (!threadIdx.x && !threadIdx.y && !threadIdx.z) && (!blockIdx.x && !blockIdx.y && !blockIdx.z);
#else
return false;
#endif
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 7,549 | C | 37.717949 | 105 | 0.594118 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/functional.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Define basic numeric operators
This is inspired by the Standard Library's <functional> header.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/half.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include <mma.h>
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct absolute_value_op {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return abs(lhs);
}
};
template <typename T>
struct plus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs += rhs;
return lhs;
}
};
template <typename T>
struct minus {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs -= rhs;
return lhs;
}
};
template <typename T>
struct multiplies {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs *= rhs;
return lhs;
}
};
/// Squares with optional conversion
template <typename T, typename Output = T>
struct square {
CUTLASS_HOST_DEVICE
Output operator()(T lhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs);
return mul_op(y, y);
}
};
/// Returns the magnitude squared of an element.
template <typename T, typename Output = T>
struct magnitude_squared {
CUTLASS_HOST_DEVICE
Output operator()(T lhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs);
return mul_op(y, y);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output = T>
struct square_difference {
CUTLASS_HOST_DEVICE
Output operator()(T lhs, T rhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs) - Output(rhs);
return mul_op(y, y);
}
};
/// Computes the square of a difference with optional conversion
template <typename T, typename Output = T>
struct magnitude_squared_difference {
CUTLASS_HOST_DEVICE
Output operator()(T lhs, T rhs) const {
multiplies<Output> mul_op;
Output y = Output(lhs) - Output(rhs);
return mul_op(y, y);
}
};
/// Divides
template <typename T>
struct divides {
CUTLASS_HOST_DEVICE
T operator()(T lhs, T const &rhs) const {
lhs /= rhs;
return lhs;
}
};
/// Negate
template <typename T>
struct negate {
CUTLASS_HOST_DEVICE
T operator()(T lhs) const {
return -lhs;
}
};
/// Greater equal
template <typename T>
struct greater_equal {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs >= rhs);
}
};
/// Greater
template <typename T>
struct greater {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs > rhs);
}
};
/// Less equal
template <typename T>
struct less_equal {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs <= rhs);
}
};
/// Less
template <typename T>
struct less {
CUTLASS_HOST_DEVICE
bool operator()(T const &lhs, T const &rhs) const {
return (lhs < rhs);
}
};
template <typename T>
struct maximum {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
return (lhs < rhs ? rhs : lhs);
}
};
template <>
struct maximum<float> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs, float const &rhs) const {
return fmaxf(lhs, rhs);
}
};
template <typename T>
struct minimum {
CUTLASS_HOST_DEVICE
T operator()(T const &lhs, T const &rhs) const {
return (rhs < lhs ? rhs : lhs);
}
};
template <>
struct minimum<float> {
CUTLASS_HOST_DEVICE
float operator()(float const &lhs, float const &rhs) const {
return fminf(lhs, rhs);
}
};
/// Fused multiply-add
template <typename A, typename B = A, typename C = A>
struct multiply_add {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
return C(a) * C(b) + c;
}
};
/// Fused multiply-add
template <typename A, typename B = A, typename C = A>
struct multiply_add_relu0 {
CUTLASS_HOST_DEVICE
C operator()(A const &a, B const &b, C const &c) const {
maximum<C> mx;
return mx(C(a) * C(b) + c, C(0));
}
};
/// Fused multiply-add
template <typename T>
struct and_add {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b, T const &c) const {
return ((a & b) + c);
}
};
/// Fused multiply-add
template <typename T>
struct xor_add {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b, T const &c) const {
return ((a ^ b) + c);
}
};
template <typename T>
struct conjugate {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return a;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct logical_and {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return ((a && b) ? T(1) : T());
}
};
template <typename T>
struct logical_or {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return ((a || b) ? T(1) : T());
}
};
template <typename T>
struct logical_not {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return T(!(a));
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct bit_and {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a & b;
}
};
template <typename T>
struct bit_or {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a | b;
}
};
template <typename T>
struct bit_not {
CUTLASS_HOST_DEVICE
T operator()(T const &a) const {
return ~a;
}
};
template <typename T>
struct bit_xor {
CUTLASS_HOST_DEVICE
T operator()(T const &a, T const &b) const {
return a ^ b;
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////
/// Reduces value into the data pointed to by ptr
template <typename T>
struct red
{
CUTLASS_DEVICE
void operator()(T *ptr, const T &data)
{
atomicAdd(ptr, data);
}
};
/// Reduces value into the data pointed to by ptr (double specialization)
template<>
struct red<double>
{
CUTLASS_DEVICE
void operator()(double *ptr, const double &data)
{
#if !defined(__CUDA_ARCH__)
#elif (__CUDA_ARCH__ >= 600)
atomicAdd(ptr, data);
#else
// Use CAS loop
unsigned long long int* ptr_int = reinterpret_cast<unsigned long long int*>(ptr);
unsigned long long int old_int = *ptr_int;
unsigned long long int assumed_int;
do {
double update = data + __longlong_as_double(old_int);
assumed_int = old_int;
old_int = atomicCAS(ptr_int, assumed_int, __double_as_longlong(update));
} while (assumed_int != old_int);
#endif // (__CUDA_ARCH__ >= 600)
}
};
/// Reduces value into the data pointed to by ptr (half2 specialization)
template<>
struct red<half2>
{
CUTLASS_DEVICE
void operator()(half2 *ptr, const half2 &data)
{
#if !defined(__CUDA_ARCH__)
#elif (__CUDA_ARCH__ >= 600)
// Vector-2 atomic reduction requires .target sm_60 or higher
uint32_t word = reinterpret_cast<const uint32_t&>(data);
asm volatile ("red.gpu.global.add.noftz.f16x2 [%0], %1;\n" : : "l"(ptr), "r"(word));
#else
// Use CAS loop
uint32_t *ptr_int = reinterpret_cast<uint32_t *>(ptr);
uint32_t old_int = *ptr_int;
uint32_t assumed_int;
do
{
half2 old = reinterpret_cast<half2&>(old_int);
half hi = __hadd(__high2half(old), __high2half(data));
half lo = __hadd(__low2half(old), __low2half(data));
half2 update = __halves2half2(hi, lo);
uint32_t update_int = reinterpret_cast<const uint32_t&>(update);
assumed_int = old_int;
old_int = atomicCAS(ptr_int, assumed_int, update_int);
} while (assumed_int != old_int);
#endif // (__CUDA_ARCH__ >= 600)
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for nvcuda::wmma::fragment<Use, m, n, k, T, Layout>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
template<typename Use, int m, int n, int k, typename T, typename Layout>
struct plus<nvcuda::wmma::fragment<Use, m, n, k, T, Layout>>
{
using Fragment = nvcuda::wmma::fragment<Use, m, n, k, T, Layout>;
using ElementType = typename Fragment::element_type;
CUTLASS_HOST_DEVICE
Fragment operator()(Fragment const &lhs, Fragment const &rhs) const
{
Fragment result;
plus<ElementType> scalar_op;
ElementType *result_elts = reinterpret_cast<ElementType*>(&result);
const ElementType *lhs_elts = reinterpret_cast<const ElementType*>(&lhs);
const ElementType *rhs_elts = reinterpret_cast<const ElementType*>(&rhs);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Fragment::num_elements; i++) {
result_elts[i] = scalar_op(lhs_elts[i], rhs_elts[i]);
}
return result;
}
};
#endif // defined(CUTLASS_ARCH_WMMA_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 11,132 | C | 23.36105 | 102 | 0.601958 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/barrier.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Implementation of a CTA-wide barrier for inter-CTA synchronization.
*/
#pragma once
#include "cutlass/cutlass.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// CTA-wide semaphore for inter-CTA synchronization.
struct Barrier
{
public:
/// Flag type
using T = int;
/// Initial flag value
static const T INIT = 0;
protected:
/// Load flag, as a strong operation (int specialization)
CUTLASS_DEVICE
static int ld_strong(int *ptr)
{
int state = 0;
#if (__CUDA_ARCH__ >= 700)
/// SM70 and newer use memory consistency qualifiers
asm volatile ("ld.global.relaxed.gpu.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
#else
asm volatile ("ld.cg.global.b32 %0, [%1];\n" : "=r"(state) : "l"(ptr));
#endif // (__CUDA_ARCH__ >= 700)
return state;
}
/// Store flag, as a strong operation (int specialization)
CUTLASS_DEVICE
static void st_strong(int *ptr, int val)
{
#if (__CUDA_ARCH__ >= 700)
/// SM70 and newer use memory consistency qualifiers
asm volatile ("st.global.relaxed.gpu.b32 [%0], %1;\n" : : "l"(ptr), "r"(val));
#else
asm volatile ("st.cg.global.b32 [%0], %1;\n" : : "l"(ptr), "r"(val));
#endif // (__CUDA_ARCH__ >= 700)
}
/// Reduce into flag, with release pattern (int specialization)
CUTLASS_DEVICE
static void red_release(int *ptr, int val)
{
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
#if (__CUDA_ARCH__ >= 700)
/// SM70 and newer use memory consistency qualifiers
asm volatile ("red.release.gpu.global.add.s32 [%0], %1;\n" : : "l"(ptr), "r"(val));
#else
__threadfence();
atomicAdd(ptr, val);
#endif // (__CUDA_ARCH__ >= 700)
#endif
}
public:
/// Uses thread[0] to wait for at least the specified count of signals on the given flag counter
CUTLASS_DEVICE
static void wait_lt(void *lock_ptr, int thread_idx, int flag_idx, int count)
{
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
if (thread_idx == 0)
{
// Spin-loop
#pragma unroll 1
while(ld_strong(flag_ptr) < count) {}
}
__syncthreads();
#endif
}
/// Uses thread[0] to wait for at least the specified count of signals on the given flag counter
CUTLASS_DEVICE
static void wait_eq(void *lock_ptr, int thread_idx, int flag_idx, T val = 1)
{
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
if (thread_idx == 0)
{
// Spin-loop
#pragma unroll 1
while(ld_strong(flag_ptr) != val) {}
}
__syncthreads();
#endif
}
/// Uses thread[0] to wait for the specified count of signals on the given flag counter
CUTLASS_DEVICE
static void wait_eq_reset(void *lock_ptr, int thread_idx, int flag_idx, T val = 1) {
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
T *flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
if (thread_idx == 0)
{
// Spin-loop
#pragma unroll 1
while(atomicCAS(flag_ptr, val, 0) != val) {}
}
__syncthreads();
#endif
}
/// Increment the arrival count for a flag
CUTLASS_DEVICE
static void arrive_inc(void *lock_ptr, int thread_idx, int flag_idx)
{
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
T* flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
__syncthreads();
if (thread_idx == 0) {
red_release(flag_ptr, 1);
}
#endif
}
/// Increment the arrival counts for a range of flags
CUTLASS_DEVICE
static void arrive_range_inc(void *lock_ptr, int thread_idx, int first_flag_idx, int count = 1)
{
#if defined(__NVCC__) || (defined(__clang__) && defined(__CUDA__)) || defined(__CUDACC_RTC__)
int flag_idx = first_flag_idx + thread_idx;
T* flag_ptr = reinterpret_cast<T*>(lock_ptr) + flag_idx;
// Barrier to make sure all other threads in block have written their data
__syncthreads();
// Select threads increment their flags
if (thread_idx < count) {
red_release(flag_ptr, 1);
}
#endif
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,521 | C | 31.287129 | 100 | 0.590554 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/block_striped.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Utilities for performing block-striped access (load, store, reduce) of trivially-copyable,
statically-sized array types to global memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/wmma_array.h"
#include "cutlass/functional.h"
#include "cutlass/complex.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
// AccessWidth
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes the maximal power-of-two that evenly divides the size of T, capped at Limit
template <
typename T,
int Limit>
struct AccessWidth
{
// Inductive case
template <
int ObjectBytes, /// Size of T in bytes
int AlignBytes, /// Template induction variable
bool IsAligned = /// Whether ObjectBytes is an even multiple of AlignBytes
((AlignBytes <= Limit) && (ObjectBytes % AlignBytes == 0))>
struct Detail
{
static const int value = Detail<ObjectBytes, AlignBytes * 2>::value;
};
// Base case (ObjectBytes is not an even multiple of AlignBytes)
template <
int ObjectBytes, /// Size of T in bytes
int AlignBytes> /// Template induction variable
struct Detail<ObjectBytes, AlignBytes, false>
{
static const int value = AlignBytes / 2;
};
/// The maximal power-of-two that evenly divides the size of T
static const int value = Detail<
(int) sizeof(T),
1>::value;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// StripedAccessType
/////////////////////////////////////////////////////////////////////////////////////////////////
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Default specialization. Striping granularity is type T.)
template <
typename T, /// Data type
int TransferBytes = /// Data access width (16 byte max for global memory access on current architectures)
AccessWidth<T, 16>::value>
struct alignas(TransferBytes) StripedAccessType : public T
{};
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Specialization for cutlass::Array<T>. Striping granularity is a multiple of T.)
template <
typename T, /// Array element type
int N, /// Number of elements in array
bool RegisterSized, /// T is register-sized
int TransferBytes> /// Data access width
struct StripedAccessType<
Array<T, N, RegisterSized>,
TransferBytes>
: public AlignedArray<
T, // Element type of StripedAccessType
__NV_STD_MAX(1, TransferBytes / (int) sizeof(T)), // Number of elements T in StripedAccessType
TransferBytes> // Alignment of StripedAccessType
{};
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
/// ReinterpretCast type for striping a trivially-copyable type in global memory
/// (Specialization for cutlass::WmmaFragmentArray<T>. Striping granularity is a multiple of T.)
template<
typename Use,
int m,
int n,
int k,
typename ElementT,
typename Layout,
int kFragments,
int TransferBytes>
struct StripedAccessType<
WmmaFragmentArray<nvcuda::wmma::fragment<Use, m, n, k, ElementT, Layout>, kFragments>,
TransferBytes>
: public AlignedArray<
ElementT,
__NV_STD_MAX(1, TransferBytes / (int) sizeof(ElementT)),
TransferBytes>
{};
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
/////////////////////////////////////////////////////////////////////////////////////////////////
// BlockStriped
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility for performing block-striped access (load, store) of trivially-copyable,
/// statically-sized array types to global memory
template <
int BlockThreads,
typename ArrayT,
typename AccessT = StripedAccessType<ArrayT> >
struct BlockStriped
{
/// Number of striped accesses
static const int kStripes = int(sizeof(ArrayT) / sizeof(AccessT));
static_assert(kStripes > 0, "AccessT type must be smaller than or equal to ArrayT type");
/// Load
CUTLASS_DEVICE
static void load(ArrayT &data, ArrayT *ptr, int thread_idx)
{
AccessT *access_input = reinterpret_cast<AccessT*>(ptr);
AccessT *access_data = reinterpret_cast<AccessT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i) {
access_data[i] = access_input[(BlockThreads * i) + thread_idx];
}
}
/// Load & Add
CUTLASS_DEVICE
static void load_add(ArrayT &data, ArrayT *ptr, int thread_idx)
{
AccessT *access_input = reinterpret_cast<AccessT*>(ptr);
AccessT *access_data = reinterpret_cast<AccessT*>(&data);
plus<AccessT> add;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i)
{
access_data[i] = add(access_data[i], access_input[(BlockThreads * i) + thread_idx]);
}
}
/// Store
CUTLASS_DEVICE
static void store(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
AccessT *access_output = reinterpret_cast<AccessT*>(ptr);
const AccessT *access_data = reinterpret_cast<const AccessT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kStripes; ++i) {
access_output[(BlockThreads * i) + thread_idx] = access_data[i];
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// BlockStripedReduce
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility for performing block-striped access (load, store, reduce) of trivially-copyable,
/// statically-sized array types to global memory.
/// (Default specialization)
template <
int BlockThreads,
typename ArrayT,
typename ElementT = typename StripedAccessType<ArrayT>::Element>
struct BlockStripedReduce :
BlockStriped<
BlockThreads,
ArrayT,
ElementT>
{
/// Reduce
CUTLASS_DEVICE
static void reduce(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
cutlass::red<ElementT> reduce;
ElementT *access_output = reinterpret_cast<ElementT*>(ptr);
const ElementT *access_data = reinterpret_cast<const ElementT*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < BlockStripedReduce::kStripes; ++i) {
reduce(access_output + (BlockThreads * i) + thread_idx, access_data[i]);
}
}
};
/// Utility for performing block-striped access (load, store, reduce) of trivially-copyable,
/// statically-sized array types to global memory.
/// (Specialization for half_t. Uses half2 vectorized-reduction.)
template <
int BlockThreads,
typename ArrayT>
struct BlockStripedReduce<BlockThreads, ArrayT, half_t> :
BlockStriped<
BlockThreads,
ArrayT,
half2>
{
static_assert(BlockStripedReduce::kStripes % 2 == 0, "Array of half must be even number in length");
/// Reduce
CUTLASS_DEVICE
static void reduce(ArrayT *ptr, const ArrayT &data, int thread_idx)
{
cutlass::red<half2> reduce;
half2 *access_output = reinterpret_cast<half2*>(ptr);
const half2 *access_data = reinterpret_cast<const half2*>(&data);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < BlockStripedReduce::kStripes; ++i)
{
reduce(access_output + (BlockThreads * i) + thread_idx, access_data[i]);
}
}
};
} // namespace cutlass
| 9,372 | C | 33.97388 | 111 | 0.616411 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/float8.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using IEEE half-precision floating-point types in host or
device code.
*/
#pragma once
#include <cuda_fp16.h>
#include "cutlass/cutlass.h"
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
//
// Standard Library headers belong here to avoid conflicts with NVRTC.
//
#include <cmath>
#include <limits>
#include <cstdint>
#include <cstring>
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900)
#if (__CUDACC_VER_MAJOR__ >= 12) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 8))
#ifndef CUDA_PTX_FP8_CVT_ENABLED
#define CUDA_PTX_FP8_CVT_ENABLED 1
#endif
#endif
#endif
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// FP8 Has 2 encodings possible : E4M3 and E5M2
//
// E4M3 : 7 | 6 5 4 3 | 2 1 0
// E5M2 : 7 | 6 5 4 3 2 | 1 0
//
///////////////////////////////////////////////////////////////////////////////////////////////////
enum class FloatEncoding {
E4M3,
E5M2
};
template<FloatEncoding T>
struct alignas(1) float8_base {
static constexpr bool IS_E4M3 = (T == FloatEncoding::E4M3);
static constexpr bool IS_E5M2 = (T == FloatEncoding::E5M2);
// Number of Bits representing mantissa and exponents
static constexpr int FP32_NUM_BITS = 32;
static constexpr int FP32_NUM_EXPONENT_BITS = 8;
static constexpr int FP32_NUM_MANTISSA_BITS = 23;
static constexpr uint32_t FP32_NAN = 0x7fffffff;
static constexpr uint32_t FP32_INFINITY_MASK = 0x7f800000;
static constexpr int FP32_MAX_EXPONENT = 127;
static constexpr int FP32_MIN_EXPONENT = -126;
static constexpr int FP32_EXPONENT_BIAS = 127;
static constexpr int FP16_NUM_BITS = 16;
static constexpr int FP16_NUM_EXPONENT_BITS = 5;
static constexpr int FP16_NUM_MANTISSA_BITS = 10;
static constexpr uint16_t FP16_NAN = 0x7fff;
static constexpr uint16_t FP16_INFINITY_MASK = 0x7c00;
static constexpr int FP16_MAX_EXPONENT = 15;
static constexpr int FP16_MIN_EXPONENT = -14;
static constexpr int FP16_EXPONENT_BIAS = 15;
static constexpr int FP8_NUM_BITS = 8;
static constexpr int FP8_NUM_EXPONENT_BITS = IS_E4M3 ? 4 : 5;
static constexpr int FP8_NUM_MANTISSA_BITS = IS_E4M3 ? 3 : 2;
static constexpr uint8_t FP8_NAN = 0x7f; // Also F8_INF
static constexpr uint8_t FP8_INFINITY_MASK = IS_E4M3 ? 0x78 : 0x7c;
static constexpr int FP8_MAX_EXPONENT = IS_E4M3 ? 7 : 15;
static constexpr int FP8_MIN_EXPONENT = IS_E4M3 ? -6 : -14;
static constexpr int FP8_EXPONENT_BIAS = IS_E4M3 ? 7 : 15;
static constexpr uint8_t FP8_EXPONENT_MASK = (1 << FP8_NUM_EXPONENT_BITS) - 1;
static constexpr uint8_t FP8_MANTISSA_MASK = (1 << FP8_NUM_MANTISSA_BITS) - 1;
static constexpr uint8_t FP8_MAX_FLT = (IS_E4M3 ? 0x7e : 0x7b);
// 256 in float
static constexpr uint32_t FP8_SAT_VAL_FP32 = 0x43800000;
//
// Data members
//
/// Data container
uint8_t storage;
/// Ctors.
CUTLASS_HOST_DEVICE
float8_base() : storage(0) { }
/// Is finite implementation
CUTLASS_HOST_DEVICE
static bool isfinite(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
return (s & 0x7f800000) < 0x7f800000;
}
/// Is NaN implementation
CUTLASS_HOST_DEVICE
static bool isnan(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
return (s & 0x7fffffff) > 0x7f800000;
}
/// Is infinite implementation
CUTLASS_HOST_DEVICE
static bool isinf(float flt) {
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
// Sign = 0 for +inf, 1 for -inf
// Exponent = all ones
// Mantissa = all zeros
return (s == 0x7f800000) || (s == 0xff800000);
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static uint8_t convert_float_to_fp8(float const& flt) {
// software implementation rounds toward nearest even
uint32_t s;
#if defined(__CUDA_ARCH__)
s = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&s, &flt, sizeof(s));
#endif
// Extract the bits in the FP32 type
uint8_t sign = uint8_t((s >> 24 & 0x80));
int8_t exp = uint8_t(((s >> FP32_NUM_MANTISSA_BITS) & 0xff) - FP32_EXPONENT_BIAS);
int mantissa = s & 0x7fffff;
uint8_t u = 0;
uint8_t const kF8_NaN = 0x7f;
// NaN => NaN
if (isnan(flt)) {
return kF8_NaN;
}
// Inf => MAX_FLT (satfinite)
if (isinf(flt)) {
return sign | FP8_MAX_FLT;
}
// Special handling
if ( exp == -128 ) {
// int8 range is from -128 to 127
// So 255(inf) - 127(bias) = 128 - will show up as -128
// satfinite
return (sign | FP8_MAX_FLT);
}
int sticky_bit = 0;
bool skip_sign = false;
bool may_be_nan = false;
if ( (exp >= FP8_MIN_EXPONENT) && (exp <= FP8_MAX_EXPONENT) ) {
// normal fp32 to normal fp8
exp = uint8_t(exp + uint8_t(FP8_EXPONENT_BIAS));
u = uint8_t(((exp & FP8_EXPONENT_MASK) << FP8_NUM_MANTISSA_BITS));
u = uint8_t(u | (mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS)));
} else if(exp < FP8_MIN_EXPONENT) {
// normal single-precision to subnormal float8-precision representation
int rshift = (FP8_MIN_EXPONENT - exp);
if (rshift < FP32_NUM_BITS) {
mantissa |= (1 << FP32_NUM_MANTISSA_BITS);
sticky_bit = ((mantissa & ((1 << rshift) - 1)) != 0);
mantissa = (mantissa >> rshift);
u = (uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS- FP8_NUM_MANTISSA_BITS)) & FP8_MANTISSA_MASK);
} else {
mantissa = 0;
u = 0;
}
// Exponent > FP8_MAX_EXPONENT - this is a special case done to match HW
// 0x4380_0000 to 0x43e0_0000 - maps from 256 to 448, and does not saturate / inf.
} else {
if( exp == (FP8_MAX_EXPONENT + 1) ) {
uint8_t mantissa_tmp = uint8_t(mantissa >> (FP32_NUM_MANTISSA_BITS - FP8_NUM_MANTISSA_BITS));
if( mantissa_tmp < FP8_MANTISSA_MASK) {
exp = uint8_t(exp + uint8_t(FP8_EXPONENT_BIAS));
u = uint8_t(exp << FP8_NUM_MANTISSA_BITS) | mantissa_tmp;
may_be_nan = (mantissa_tmp == (FP8_MANTISSA_MASK-1));
} else {
// satfinite
return (sign | FP8_MAX_FLT);
}
} else{
// satfinite
return (sign | FP8_MAX_FLT);
}
}
// round to nearest even
int NUM_BITS_SHIFT = FP32_NUM_MANTISSA_BITS - (FP8_NUM_MANTISSA_BITS + 1);
int round_bit = ((mantissa >> NUM_BITS_SHIFT) & 1);
sticky_bit |= ((mantissa & ((1 << NUM_BITS_SHIFT) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && (u & 1))) {
u = uint8_t(u + 1);
if( may_be_nan ) {
skip_sign = true;
}
}
if (u > FP8_MAX_FLT) {
// satfinite
u = (sign | FP8_MAX_FLT);
}
if( ! skip_sign ) {
u |= sign;
}
return u;
}
/// Converts a fp8 value stored as a uint8_t to a float
CUTLASS_HOST_DEVICE
static float convert_fp8_to_float(uint8_t const& x) {
uint32_t constexpr kF32_NaN = 0x7fffffff;
uint8_t const &f8 = x;
int sign = (f8 >> (FP8_NUM_BITS - 1)) & 1;
int exp = (f8 >> FP8_NUM_MANTISSA_BITS) & FP8_EXPONENT_MASK;
int mantissa = f8 & FP8_MANTISSA_MASK;
unsigned f = (sign << (FP32_NUM_BITS-1));
if (IS_E4M3 && exp == 15 && mantissa == 0x7) {
f = kF32_NaN;
}
else if (exp > 0 && (IS_E4M3 || exp < (FP8_MAX_EXPONENT + FP8_EXPONENT_BIAS + 1))) {
// normal
exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS);
f = f |
(exp << FP32_NUM_MANTISSA_BITS) |
(mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS));
} else if (exp == 0) {
if (mantissa) {
// subnormal
exp += (FP32_EXPONENT_BIAS - FP8_EXPONENT_BIAS) + 1;
while ((mantissa & (1 << FP8_NUM_MANTISSA_BITS)) == 0) {
mantissa <<= 1;
exp--;
}
mantissa &= FP8_MANTISSA_MASK;
f = f |
(exp << FP32_NUM_MANTISSA_BITS) |
(mantissa << (FP32_NUM_MANTISSA_BITS-FP8_NUM_MANTISSA_BITS));
} else {
// sign-preserving zero
}
} else {
if(mantissa == 0){
// Sign-preserving infinity
f = (f | 0x7f800000);
} else {
// Canonical NaN
f = kF32_NaN;
}
}
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const&>(f);
#else
float flt;
std::memcpy(&flt, &f, sizeof(flt));
return flt;
#endif
}
};
// Forward declaration of float_e5m2_t to define float_e4m3_t <=> float_e5m2_t
// conversions in class float_e4m3_t
struct float_e5m2_t;
///////////////////////////////////////////////////////////////
///
/// floating-point 8 type : E4M3
///
///////////////////////////////////////////////////////////////
struct alignas(1) float_e4m3_t : float8_base<FloatEncoding::E4M3> {
using Base = float8_base<FloatEncoding::E4M3>;
static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT;
//
// Static conversion operators
//
/// Constructs from an uint8_t
CUTLASS_HOST_DEVICE
static float_e4m3_t bitcast(uint8_t x) {
float_e4m3_t f;
f.storage = x;
return f;
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e4m3_t from_float(float const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp;
float y = float();
asm volatile("cvt.rn.satfinite.e4m3x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt));
return *reinterpret_cast<float_e4m3_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(flt));
#endif
}
/// FP16 -> E5M2 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e4m3_t from_half(half const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp = 0;
uint32_t bits = reinterpret_cast<uint16_t const &>(flt);
asm volatile("cvt.rn.satfinite.e4m3x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits));
return *reinterpret_cast<float_e4m3_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(float(flt)));
#endif
}
// E4M3 -> half
CUTLASS_HOST_DEVICE
static half to_half(float_e4m3_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return reinterpret_cast<half2 const &>(packed).x;
#else
return half(Base::convert_fp8_to_float(x.storage));
#endif
}
// E4M3 -> Float
CUTLASS_HOST_DEVICE
static float to_float(float_e4m3_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e4m3x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return float(reinterpret_cast<half2 const &>(packed).x);
#else
return Base::convert_fp8_to_float(x.storage);
#endif
}
//
// Methods
//
/// Default constructor
CUTLASS_HOST_DEVICE
float_e4m3_t() : Base() { }
/// Reinterpret cast from CUDA's FP8 type
CUTLASS_HOST_DEVICE
float_e4m3_t(float_e4m3_t const& x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint8_t const &>(x);
#else
uint8_t raw = x.storage;
std::memcpy(&storage, &raw, sizeof(storage));
#endif
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(float x) {
storage = from_float(x).storage;
}
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(half x) {
storage = from_half(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(double x): float_e4m3_t(float(x)) {
}
/// Integer conversion
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(int x): float_e4m3_t(float(x)) {
}
/// E5M2 conversion. Defined after float_e5m2_t is defined.
CUTLASS_HOST_DEVICE
explicit float_e4m3_t(float_e5m2_t x);
/// Assignment
CUTLASS_HOST_DEVICE
float_e4m3_t & operator=(float_e4m3_t const &x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint8_t const &>(x);
#else
uint8_t raw = x.storage;
std::memcpy(&storage, &raw, sizeof(storage));
#endif
return *this;
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return to_float(*this);
}
/// Converts to half
CUTLASS_HOST_DEVICE
operator half() const {
return to_half(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(to_float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
#if defined(__CUDA_ARCH__)
return __half2int_rn(to_half(*this));
#else
return int(to_float(*this));
#endif
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
#if defined(__CUDA_ARCH__)
return bool(__half2int_rn(to_half(*this)));
#else
return bool(int(to_float(*this)));
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & Base::FP8_MANTISSA_MASK);
}
};
///////////////////////////////////////////////////////////////
///
/// floating-point 8 type : E5M2
///
///////////////////////////////////////////////////////////////
struct alignas(1) float_e5m2_t : float8_base<FloatEncoding::E5M2> {
using Base = float8_base<FloatEncoding::E5M2>;
static constexpr int MAX_EXPONENT = Base::FP8_MAX_EXPONENT;
//
// Static conversion operators
//
/// Constructs from an uint8_t
CUTLASS_HOST_DEVICE
static float_e5m2_t bitcast(uint8_t x) {
float_e5m2_t f;
f.storage = x;
return f;
}
/// FP32 -> FP8 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e5m2_t from_float(float const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp;
float y = float();
asm volatile("cvt.rn.satfinite.e5m2x2.f32 %0, %1, %2;" : "=h"(tmp) : "f"(y), "f"(flt));
return *reinterpret_cast<float_e5m2_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(flt));
#endif
}
/// FP16 -> E5M2 conversion - rounds to nearest even
CUTLASS_HOST_DEVICE
static float_e5m2_t from_half(half const& flt) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t tmp = 0;
uint32_t bits = reinterpret_cast<uint16_t const &>(flt);
asm volatile("cvt.rn.satfinite.e5m2x2.f16x2 %0, %1;" : "=h"(tmp) : "r"(bits));
return *reinterpret_cast<float_e5m2_t *>(&tmp);
#else
return bitcast(Base::convert_float_to_fp8(float(flt)));
#endif
}
// E5M2 -> half
CUTLASS_HOST_DEVICE
static half to_half(float_e5m2_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return reinterpret_cast<half2 const &>(packed).x;
#else
return half(Base::convert_fp8_to_float(x.storage));
#endif
}
// E5M2 -> Float
CUTLASS_HOST_DEVICE
static float to_float(float_e5m2_t const& x) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint16_t bits = x.storage;
uint32_t packed;
asm volatile("cvt.rn.f16x2.e5m2x2 %0, %1;\n" : "=r"(packed) : "h"(bits));
return float(reinterpret_cast<half2 const &>(packed).x);
#else
return Base::convert_fp8_to_float(x.storage);
#endif
}
//
// Methods
//
/// Default constructor
CUTLASS_HOST_DEVICE
float_e5m2_t() : Base() { }
/// Reinterpret cast from CUDA's FP8 type
CUTLASS_HOST_DEVICE
float_e5m2_t(float_e5m2_t const& x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint8_t const &>(x);
#else
uint8_t raw = x.storage;
std::memcpy(&storage, &raw, sizeof(storage));
#endif
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(float x) {
storage = from_float(x).storage;
}
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(half x) {
storage = from_half(x).storage;
}
/// Floating point conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(double x): float_e5m2_t(float(x)) {
}
/// Integer conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(int x): float_e5m2_t(float(x)) {
}
/// E4M3 conversion
CUTLASS_HOST_DEVICE
explicit float_e5m2_t(float_e4m3_t x);
/// Assignment
CUTLASS_HOST_DEVICE
float_e5m2_t & operator=(float_e5m2_t const &x) {
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint8_t const &>(x);
#else
uint8_t raw = x.storage;
std::memcpy(&storage, &raw, sizeof(storage));
#endif
return *this;
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
return to_float(*this);
}
/// Converts to half
CUTLASS_HOST_DEVICE
operator half() const {
return to_half(*this);
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(to_float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
#if defined(__CUDA_ARCH__)
return __half2int_rn(to_half(*this));
#else
return int(to_float(*this));
#endif
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
#if defined(__CUDA_ARCH__)
return bool(__half2int_rn(to_half(*this)));
#else
return bool(int(to_float(*this)));
#endif
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t& raw() {
return storage;
}
/// Accesses raw internal state
CUTLASS_HOST_DEVICE
uint8_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((storage & (1 << (Base::FP8_NUM_BITS - 1))) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((storage >> FP8_NUM_MANTISSA_BITS) & Base::FP8_EXPONENT_MASK);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 15;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(storage & Base::FP8_MANTISSA_MASK);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator+(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator-(float_e4m3_t const& lhs) {
return float_e4m3_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator-(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator*(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator/(float_e4m3_t const& lhs, float_e4m3_t const& rhs) {
return float_e4m3_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator+=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator-=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator*=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator/=(float_e4m3_t & lhs, float_e4m3_t const& rhs) {
lhs = float_e4m3_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator++(float_e4m3_t & lhs) {
float tmp(lhs);
++tmp;
lhs = float_e4m3_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t& operator--(float_e4m3_t & lhs) {
float tmp(lhs);
--tmp;
lhs = float_e4m3_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator++(float_e4m3_t & lhs, int) {
float_e4m3_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = float_e4m3_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
float_e4m3_t operator--(float_e4m3_t & lhs, int) {
float_e4m3_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = float_e4m3_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
bool operator==(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator+(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator-(float_e5m2_t const& lhs) {
return float_e5m2_t(-float(lhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator-(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator*(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator/(float_e5m2_t const& lhs, float_e5m2_t const& rhs) {
return float_e5m2_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator+=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator-=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator*=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator/=(float_e5m2_t & lhs, float_e5m2_t const& rhs) {
lhs = float_e5m2_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator++(float_e5m2_t & lhs) {
float tmp(lhs);
++tmp;
lhs = float_e5m2_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t& operator--(float_e5m2_t & lhs) {
float tmp(lhs);
--tmp;
lhs = float_e5m2_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator++(float_e5m2_t & lhs, int) {
float_e5m2_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = float_e5m2_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
float_e5m2_t operator--(float_e5m2_t & lhs, int) {
float_e5m2_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = float_e5m2_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// float_e4m3_t <=> float_e5m2_t conversions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// float_e4m3_t <= float_e5m2_t
CUTLASS_HOST_DEVICE
float_e4m3_t::float_e4m3_t(float_e5m2_t x) {
storage = from_float(float_e5m2_t::to_float(x)).storage;
}
/// float_e5m2_t <= float_e4m3_t
CUTLASS_HOST_DEVICE
float_e5m2_t::float_e5m2_t(float_e4m3_t x) {
storage = from_float(float_e4m3_t::to_float(x)).storage;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#if !defined(__CUDACC_RTC__)
namespace std {
/// Numeric limits common to all float8 types
template <typename T>
struct float8_base_numeric_limits {
private:
using F8Type = T;
public:
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = F8Type::FP8_NUM_MANTISSA_BITS;
/// Least positive value
static F8Type min() { return F8Type::bitcast(0x01); }
/// Maximum finite value
static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); }
/// Returns maximum rounding error
static F8Type round_error() { return F8Type(0.5f); }
/// Returns positive infinity value
static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); }
/// Returns quiet NaN value
static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns signaling NaN value
static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns smallest positive subnormal value
static F8Type denorm_min() { return F8Type::bitcast(0x01); }
};
/// Numeric limits for float_e4m3_t
template <>
struct numeric_limits<cutlass::float_e4m3_t> :
public float8_base_numeric_limits<cutlass::float_e4m3_t> {
static bool const has_infinity = false;
/// Minimum finite value
static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); }
/// Returns smallest finite value
static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); }
};
/// Numeric limits for float_e5m2_t
template <>
struct numeric_limits<cutlass::float_e5m2_t> :
public float8_base_numeric_limits<cutlass::float_e5m2_t> {
static bool const has_infinity = true;
/// Minimum finite value
static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); }
/// Returns smallest finite value
static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); }
};
} // namespace std
#endif
namespace platform {
/// Numeric limits common to all float8 types
template <typename T>
struct float8_base_numeric_limits {
private:
using F8Type = T;
public:
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
#if !defined(__CUDACC_RTC__)
static std::float_denorm_style const has_denorm = std::denorm_present;
#endif
static bool const has_denorm_loss = true;
#if !defined(__CUDACC_RTC__)
static std::float_round_style const round_style = std::round_to_nearest;
#endif
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = F8Type::FP8_NUM_MANTISSA_BITS;
/// Least positive value
static F8Type min() { return F8Type::bitcast(0x01); }
/// Maximum finite value
static F8Type max() { return F8Type::bitcast(F8Type::FP8_MAX_FLT); }
/// Returns maximum rounding error
static F8Type round_error() { return F8Type(0.5f); }
/// Returns positive infinity value
static F8Type infinity() { return F8Type::bitcast(F8Type::FP8_INFINITY_MASK); }
/// Returns quiet NaN value
static F8Type quiet_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns signaling NaN value
static F8Type signaling_NaN() { return F8Type::bitcast(F8Type::FP8_NAN); }
/// Returns smallest positive subnormal value
static F8Type denorm_min() { return F8Type::bitcast(0x01); }
};
/// std::numeric_limits
template <class T>
struct numeric_limits;
/// Numeric limits for float_e4m3_t
template <>
struct numeric_limits<cutlass::float_e4m3_t> :
public float8_base_numeric_limits<cutlass::float_e4m3_t> {
static bool const has_infinity = false;
/// Minimum finite value
static cutlass::float_e4m3_t lowest() { return cutlass::float_e4m3_t::bitcast(0xfe); }
/// Returns smallest finite value
static cutlass::float_e4m3_t epsilon() { return cutlass::float_e4m3_t::bitcast(0x20); }
};
/// Numeric limits for float_e5m2_t
template <>
struct numeric_limits<cutlass::float_e5m2_t> :
public float8_base_numeric_limits<cutlass::float_e5m2_t> {
static bool const has_infinity = true;
/// Minimum finite value
static cutlass::float_e5m2_t lowest() { return cutlass::float_e5m2_t::bitcast(0xfb); }
/// Returns smallest finite value
static cutlass::float_e5m2_t epsilon() { return cutlass::float_e5m2_t::bitcast(0x34); }
};
} // namespace platform
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::float_e4m3_t operator "" _fe4m3(long double x) {
return cutlass::float_e4m3_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e4m3_t operator "" _fe4m3(unsigned long long int x) {
return cutlass::float_e4m3_t(int(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e5m2_t operator "" _fe5m2(long double x) {
return cutlass::float_e5m2_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::float_e5m2_t operator "" _fe5m2(unsigned long long int x) {
return cutlass::float_e5m2_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 35,181 | C | 27.980231 | 111 | 0.584918 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/matrix.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
\file
\brief Matrix classes with value semantics.
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <iosfwd>
#include <cmath>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/coord.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/matrix.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Primary template with partial specializations to follow
template <typename Element, int Rows, int Columns> struct Matrix;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 1, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 1;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 2;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 1-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 1-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1
) {
data[0] = _0_0; data[1] = _0_1;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> transpose() const {
Matrix<Element, 2, 1> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Forms a 1-by-2 matrix by horizontally concatenating an Element with an Element
CUTLASS_HOST_DEVICE
static Matrix hcat(Element lhs, Element rhs) {
return Matrix(
lhs, rhs);
}
/// Concatenates this matrix with a an Element to form a 1-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> hcat(Element rhs) const {
return Matrix<Element, 1, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 1-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> hcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 1, 4>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> vcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 2, 2>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> vcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 3, 2>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-2 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> vcat(Matrix<Element, 3, 2> const & rhs) const {
return Matrix<Element, 4, 2>::vcat(*this, rhs);
}
/// Elementwise add operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
return result;
}
/// Elementwise add operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
return *this;
}
/// Elementwise subtract operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
return result;
}
/// Elementwise subtract operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
return *this;
}
/// Elementwise multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
return result;
}
/// Scalar multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
return result;
}
/// Scalar multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
return *this;
}
/// Elementwise divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
return result;
}
/// Scalar divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
return result;
}
/// Scalar divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
return *this;
}
/// Elementwise divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (1-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
return m;
}
/// Matrix product of size 1-by-1-by-2
CUTLASS_HOST_DEVICE
Element product(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const {
// k=0
accum += data[0] * rhs.data[0];
// k=1
accum += data[1] * rhs.data[1];
return accum;
}
/// Matrix product of size 1-by-1-by-2
CUTLASS_HOST_DEVICE
Element operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
return accum;
}
/// Matrix product of size 1-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 1-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
return accum;
}
/// Matrix product of size 1-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
return accum;
}
/// Matrix product of size 1-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 2> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 1-by-2 matrix
template <typename Element>
using Matrix1x2 = Matrix<Element, 1, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix1x2<Element> make_Matrix1x2(
Element _0_0, Element _0_1
) {
return Matrix1x2<Element>(
_0_0, _0_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 1, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 1;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 3;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 1-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 1-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> transpose() const {
Matrix<Element, 3, 1> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Forms a 1-by-3 matrix by horizontally concatenating an Element with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Element lhs, Matrix<Element, 1, 2> const & rhs) {
return Matrix(
lhs, rhs.at(0, 0), rhs.at(0, 1));
}
/// Forms a 1-by-3 matrix by horizontally concatenating a 1-by-2 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 1, 2> const & lhs, Element rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs);
}
/// Concatenates this matrix with a an Element to form a 1-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> hcat(Element rhs) const {
return Matrix<Element, 1, 4>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 1-by-3 matrix to form a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> vcat(Matrix<Element, 1, 3> const & rhs) const {
return Matrix<Element, 2, 3>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-3 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> vcat(Matrix<Element, 2, 3> const & rhs) const {
return Matrix<Element, 3, 3>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-3 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> vcat(Matrix<Element, 3, 3> const & rhs) const {
return Matrix<Element, 4, 3>::vcat(*this, rhs);
}
/// Elementwise add operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
return result;
}
/// Elementwise add operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
return *this;
}
/// Elementwise subtract operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
return result;
}
/// Elementwise subtract operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
return *this;
}
/// Elementwise multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
return result;
}
/// Scalar multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
return result;
}
/// Scalar multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
return *this;
}
/// Elementwise divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
return result;
}
/// Scalar divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
return result;
}
/// Scalar divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
return *this;
}
/// Elementwise divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (1-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
return m;
}
/// Matrix product of size 1-by-1-by-3
CUTLASS_HOST_DEVICE
Element product(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const {
// k=0
accum += data[0] * rhs.data[0];
// k=1
accum += data[1] * rhs.data[1];
// k=2
accum += data[2] * rhs.data[2];
return accum;
}
/// Matrix product of size 1-by-1-by-3
CUTLASS_HOST_DEVICE
Element operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
return accum;
}
/// Matrix product of size 1-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
return accum;
}
/// Matrix product of size 1-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 1-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
return accum;
}
/// Matrix product of size 1-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 3> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
/// Cross product
CUTLASS_HOST_DEVICE
Matrix cross(Matrix const &rhs) const {
return Matrix(
data[1] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[1] - data[1] * rhs.data[0]
);
}
};
/// Template alias for 1-by-3 matrix
template <typename Element>
using Matrix1x3 = Matrix<Element, 1, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix1x3<Element> make_Matrix1x3(
Element _0_0, Element _0_1, Element _0_2
) {
return Matrix1x3<Element>(
_0_0, _0_1, _0_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 1, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 1;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 4;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 1-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 1-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> transpose() const {
Matrix<Element, 4, 1> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
mt.data[3] = data[3];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 1 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Forms a 1-by-4 matrix by horizontally concatenating an Element with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Element lhs, Matrix<Element, 1, 3> const & rhs) {
return Matrix(
lhs, rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2));
}
/// Forms a 1-by-4 matrix by horizontally concatenating a 1-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 1, 2> const & lhs, Matrix<Element, 1, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1));
}
/// Forms a 1-by-4 matrix by horizontally concatenating a 1-by-3 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 1, 3> const & lhs, Element rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs);
}
/// Concatenates this matrix with a a 1-by-4 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> vcat(Matrix<Element, 1, 4> const & rhs) const {
return Matrix<Element, 2, 4>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-4 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> vcat(Matrix<Element, 2, 4> const & rhs) const {
return Matrix<Element, 3, 4>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-4 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> vcat(Matrix<Element, 3, 4> const & rhs) const {
return Matrix<Element, 4, 4>::vcat(*this, rhs);
}
/// Elementwise add operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
return result;
}
/// Elementwise add operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
return *this;
}
/// Elementwise subtract operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
return result;
}
/// Elementwise subtract operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
return *this;
}
/// Elementwise multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
return result;
}
/// Scalar multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
return result;
}
/// Scalar multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
return *this;
}
/// Elementwise divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
return result;
}
/// Scalar divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
return result;
}
/// Scalar divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
return *this;
}
/// Elementwise divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (1-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
return m;
}
/// Matrix product of size 1-by-1-by-4
CUTLASS_HOST_DEVICE
Element product(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const {
// k=0
accum += data[0] * rhs.data[0];
// k=1
accum += data[1] * rhs.data[1];
// k=2
accum += data[2] * rhs.data[2];
// k=3
accum += data[3] * rhs.data[3];
return accum;
}
/// Matrix product of size 1-by-1-by-4
CUTLASS_HOST_DEVICE
Element operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 1, 2> accum = Matrix<Element, 1, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
return accum;
}
/// Matrix product of size 1-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 1, 3> accum = Matrix<Element, 1, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
return accum;
}
/// Matrix product of size 1-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 1, 4> accum = Matrix<Element, 1, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
return accum;
}
/// Matrix product of size 1-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 1-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 4> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 1-by-4 matrix
template <typename Element>
using Matrix1x4 = Matrix<Element, 1, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix1x4<Element> make_Matrix1x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3
) {
return Matrix1x4<Element>(
_0_0, _0_1, _0_2, _0_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-1 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 1> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 1;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 2;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-1 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0,
Element _1_0
) {
data[0] = _0_0;
data[1] = _1_0;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> transpose() const {
Matrix<Element, 1, 2> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> hcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 2, 2>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> hcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 2, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-3 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 3> const & rhs) const {
return Matrix<Element, 2, 4>::hcat(*this, rhs);
}
/// Forms a 2-by-1 matrix by vertically concatenating an Element with an Element
CUTLASS_HOST_DEVICE
static Matrix vcat(Element upper, Element lower) {
return Matrix(
upper
, lower);
}
/// Concatenates this matrix with a an Element to form a 3-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> vcat(Element rhs) const {
return Matrix<Element, 3, 1>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 4-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> vcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 4, 1>::vcat(*this, rhs);
}
/// Elementwise add operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
return result;
}
/// Elementwise add operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
return *this;
}
/// Elementwise subtract operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
return result;
}
/// Elementwise subtract operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
return *this;
}
/// Elementwise multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
return result;
}
/// Scalar multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
return result;
}
/// Scalar multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
return *this;
}
/// Elementwise divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
return result;
}
/// Scalar divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
return result;
}
/// Scalar divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
return *this;
}
/// Elementwise divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
return m;
}
/// Matrix product of size 2-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 1, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[1] * rhs.data[0];
return accum;
}
/// Matrix product of size 2-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 1, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 2-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 1, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[1] * rhs.data[0];
accum.data[3] += data[1] * rhs.data[1];
return accum;
}
/// Matrix product of size 2-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 1, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 1, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[1] * rhs.data[0];
accum.data[4] += data[1] * rhs.data[1];
accum.data[5] += data[1] * rhs.data[2];
return accum;
}
/// Matrix product of size 2-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 1, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 1, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[1] * rhs.data[0];
accum.data[5] += data[1] * rhs.data[1];
accum.data[6] += data[1] * rhs.data[2];
accum.data[7] += data[1] * rhs.data[3];
return accum;
}
/// Matrix product of size 2-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 1, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 2, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Dot product of vectors with extent 2
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 2> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 2-by-1 matrix
template <typename Element>
using Matrix2x1 = Matrix<Element, 2, 1>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x1<Element> make_Matrix2x1(
Element _0_0,
Element _1_0
) {
return Matrix2x1<Element>(
_0_0,
_1_0
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 4;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1
) {
data[0] = _0_0; data[1] = _0_1;
data[2] = _1_0; data[3] = _1_1;
}
/// Constucts a 2-by-2 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 2> const &row_0,
Matrix<Element, 1, 2> const &row_1
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_1.data[0];
data[3] = row_1.data[1];
}
/// Static method to construct a 2-by-2 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 2, 1> const &column_0,
Matrix<Element, 2, 1> const &column_1
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_0.data[1];
result.data[3] = column_1.data[1];
return result;
}
/// Constructs an identity matrix
CUTLASS_HOST_DEVICE
static Matrix identity() {
Matrix m;
m.data[0] = Element(1);
m.data[3] = Element(1);
return m;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[3];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> transpose() const {
Matrix<Element, 2, 2> mt;
mt.data[0] = data[0];
mt.data[2] = data[1];
mt.data[1] = data[2];
mt.data[3] = data[3];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
return *this;
}
/// Forms a 2-by-2 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0)
, lhs.at(1, 0), rhs.at(1, 0));
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> hcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 2, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 2, 4>::hcat(*this, rhs);
}
/// Forms a 2-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 1, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, lower.at(0, 0), lower.at(0, 1));
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> vcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 3, 2>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-2 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> vcat(Matrix<Element, 2, 2> const & rhs) const {
return Matrix<Element, 4, 2>::vcat(*this, rhs);
}
/// Forms a 2-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Element B,
Element C, Element D) {
return Matrix(
A, B
, C, D
);
}
/// Elementwise add operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
return result;
}
/// Elementwise add operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
return *this;
}
/// Elementwise subtract operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
return result;
}
/// Elementwise subtract operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
return *this;
}
/// Elementwise multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
return result;
}
/// Scalar multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
return result;
}
/// Scalar multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
return *this;
}
/// Elementwise divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
return result;
}
/// Scalar divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
return result;
}
/// Scalar divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
return *this;
}
/// Elementwise divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
return m;
}
/// Matrix product of size 2-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 2, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[2] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[3] * rhs.data[1];
return accum;
}
/// Matrix product of size 2-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[2] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[3] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[3];
return accum;
}
/// Matrix product of size 2-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 2-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[2] * rhs.data[0];
accum.data[4] += data[2] * rhs.data[1];
accum.data[5] += data[2] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
return accum;
}
/// Matrix product of size 2-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[2] * rhs.data[2];
accum.data[7] += data[2] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[3] * rhs.data[6];
accum.data[7] += data[3] * rhs.data[7];
return accum;
}
/// Matrix product of size 2-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[3];
return accum;
}
/// Returns 2-by-2 rotation matrix
CUTLASS_HOST_DEVICE
static Matrix rotation(Element theta) {
Element c = fast_cos(theta);
Element s = fast_sin(theta);
return Matrix(
c, -s,
s, c
);
}
/// Computes the determinant of a 2-by-2 matrix
CUTLASS_HOST_DEVICE
Element determinant(Element accum = Element()) const {
accum += data[0] * data[3] - data[1] * data[2];
return accum;
}
/// Computes the inverse of a 2-by-2 matrix given
/// the matrix's determinant
CUTLASS_HOST_DEVICE
Matrix inverse(Element det) const {
return Matrix(
data[3], -data[1],
-data[2], data[0]
) * (Element(1) / det);
}
/// Computes the inverse of a 2-by-2 matrix.
CUTLASS_HOST_DEVICE
Matrix inverse() const {
return inverse(determinant());
}
};
/// Template alias for 2-by-2 matrix
template <typename Element>
using Matrix2x2 = Matrix<Element, 2, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x2<Element> make_Matrix2x2(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1
) {
return Matrix2x2<Element>(
_0_0, _0_1,
_1_0, _1_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 6;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
data[3] = _1_0; data[4] = _1_1; data[5] = _1_2;
}
/// Constucts a 2-by-3 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 3> const &row_0,
Matrix<Element, 1, 3> const &row_1
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_1.data[0];
data[4] = row_1.data[1];
data[5] = row_1.data[2];
}
/// Static method to construct a 2-by-3 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 3, 1> const &column_0,
Matrix<Element, 3, 1> const &column_1,
Matrix<Element, 3, 1> const &column_2
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_0.data[1];
result.data[4] = column_1.data[1];
result.data[5] = column_2.data[1];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[3];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> transpose() const {
Matrix<Element, 3, 2> mt;
mt.data[0] = data[0];
mt.data[2] = data[1];
mt.data[4] = data[2];
mt.data[1] = data[3];
mt.data[3] = data[4];
mt.data[5] = data[5];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
return *this;
}
/// Forms a 2-by-3 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1));
}
/// Forms a 2-by-3 matrix by horizontally concatenating a 2-by-2 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 2> const & lhs, Matrix<Element, 2, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0));
}
/// Concatenates this matrix with a a 2-by-1 matrix to form a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> hcat(Matrix<Element, 2, 1> const & rhs) const {
return Matrix<Element, 2, 4>::hcat(*this, rhs);
}
/// Forms a 2-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 1, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2));
}
/// Concatenates this matrix with a a 1-by-3 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> vcat(Matrix<Element, 1, 3> const & rhs) const {
return Matrix<Element, 3, 3>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-3 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> vcat(Matrix<Element, 2, 3> const & rhs) const {
return Matrix<Element, 4, 3>::vcat(*this, rhs);
}
/// Forms a 2-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 2> const & B,
Element C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1)
, C, D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 2-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Element B,
Matrix<Element, 1, 2> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B
, C.at(0, 0), C.at(0, 1), D
);
}
/// Elementwise add operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
return result;
}
/// Elementwise add operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
return *this;
}
/// Elementwise subtract operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
return result;
}
/// Elementwise subtract operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
return *this;
}
/// Elementwise multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
return result;
}
/// Scalar multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
return result;
}
/// Scalar multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
return *this;
}
/// Elementwise divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
return result;
}
/// Scalar divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
return result;
}
/// Scalar divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
return *this;
}
/// Elementwise divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
return m;
}
/// Matrix product of size 2-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 3, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[3] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[4] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[5] * rhs.data[2];
return accum;
}
/// Matrix product of size 2-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[3] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[4] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[5] * rhs.data[4];
accum.data[3] += data[5] * rhs.data[5];
return accum;
}
/// Matrix product of size 2-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[0];
accum.data[4] += data[3] * rhs.data[1];
accum.data[5] += data[3] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[5] * rhs.data[6];
accum.data[4] += data[5] * rhs.data[7];
accum.data[5] += data[5] * rhs.data[8];
return accum;
}
/// Matrix product of size 2-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 2-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[0];
accum.data[5] += data[3] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[2];
accum.data[7] += data[3] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[4] * rhs.data[6];
accum.data[7] += data[4] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[5] * rhs.data[8];
accum.data[5] += data[5] * rhs.data[9];
accum.data[6] += data[5] * rhs.data[10];
accum.data[7] += data[5] * rhs.data[11];
return accum;
}
/// Matrix product of size 2-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[4];
return accum;
}
};
/// Template alias for 2-by-3 matrix
template <typename Element>
using Matrix2x3 = Matrix<Element, 2, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x3<Element> make_Matrix2x3(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2
) {
return Matrix2x3<Element>(
_0_0, _0_1, _0_2,
_1_0, _1_1, _1_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 2-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 2, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 2;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 8;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 2-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 2-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3;
}
/// Constucts a 2-by-4 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 4> const &row_0,
Matrix<Element, 1, 4> const &row_1
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_0.data[3];
data[4] = row_1.data[0];
data[5] = row_1.data[1];
data[6] = row_1.data[2];
data[7] = row_1.data[3];
}
/// Static method to construct a 2-by-4 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 4, 1> const &column_0,
Matrix<Element, 4, 1> const &column_1,
Matrix<Element, 4, 1> const &column_2,
Matrix<Element, 4, 1> const &column_3
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_3.data[0];
result.data[4] = column_0.data[1];
result.data[5] = column_1.data[1];
result.data[6] = column_2.data[1];
result.data[7] = column_3.data[1];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[3] = diag.data[1];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[3];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> transpose() const {
Matrix<Element, 4, 2> mt;
mt.data[0] = data[0];
mt.data[2] = data[1];
mt.data[4] = data[2];
mt.data[6] = data[3];
mt.data[1] = data[4];
mt.data[3] = data[5];
mt.data[5] = data[6];
mt.data[7] = data[7];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 2 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> column(int j) const {
return slice_2x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 2, 1> const &v, int j =0) {
return set_slice_2x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const {
Matrix<Element, 2, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
return *this;
}
/// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-1 matrix with a 2-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 1> const & lhs, Matrix<Element, 2, 3> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2));
}
/// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-2 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 2> const & lhs, Matrix<Element, 2, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1));
}
/// Forms a 2-by-4 matrix by horizontally concatenating a 2-by-3 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 2, 3> const & lhs, Matrix<Element, 2, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0));
}
/// Forms a 2-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 1-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 1, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3));
}
/// Concatenates this matrix with a a 1-by-4 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> vcat(Matrix<Element, 1, 4> const & rhs) const {
return Matrix<Element, 3, 4>::vcat(*this, rhs);
}
/// Concatenates this matrix with a a 2-by-4 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> vcat(Matrix<Element, 2, 4> const & rhs) const {
return Matrix<Element, 4, 4>::vcat(*this, rhs);
}
/// Forms a 2-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 3> const & B,
Element C, Matrix<Element, 1, 3> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1), B.at(0, 2)
, C, D.at(0, 0), D.at(0, 1), D.at(0, 2)
);
}
/// Forms a 2-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 2-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 3> const & A, Element B,
Matrix<Element, 1, 3> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D
);
}
/// Elementwise add operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
return result;
}
/// Elementwise add operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
return *this;
}
/// Elementwise subtract operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
return result;
}
/// Elementwise subtract operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
return *this;
}
/// Elementwise multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
return result;
}
/// Scalar multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
return result;
}
/// Scalar multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
return *this;
}
/// Elementwise divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
return result;
}
/// Scalar divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
return result;
}
/// Scalar divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
return *this;
}
/// Elementwise divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (2-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
return m;
}
/// Matrix product of size 2-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> product(
Matrix<Element, 4, 1> const &rhs,
Matrix<Element, 2, 1> accum = Matrix<Element, 2, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[4] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[5] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[6] * rhs.data[2];
// k=3
accum.data[0] += data[3] * rhs.data[3];
accum.data[1] += data[7] * rhs.data[3];
return accum;
}
/// Matrix product of size 2-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 2, 2> accum = Matrix<Element, 2, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[4] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[5] * rhs.data[2];
accum.data[3] += data[5] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[6] * rhs.data[4];
accum.data[3] += data[6] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
accum.data[2] += data[7] * rhs.data[6];
accum.data[3] += data[7] * rhs.data[7];
return accum;
}
/// Matrix product of size 2-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 2, 3> accum = Matrix<Element, 2, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[0];
accum.data[4] += data[4] * rhs.data[1];
accum.data[5] += data[4] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[6] * rhs.data[6];
accum.data[4] += data[6] * rhs.data[7];
accum.data[5] += data[6] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
accum.data[3] += data[7] * rhs.data[9];
accum.data[4] += data[7] * rhs.data[10];
accum.data[5] += data[7] * rhs.data[11];
return accum;
}
/// Matrix product of size 2-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 2, 4> accum = Matrix<Element, 2, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[4] * rhs.data[2];
accum.data[7] += data[4] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[6];
accum.data[7] += data[5] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[6] * rhs.data[8];
accum.data[5] += data[6] * rhs.data[9];
accum.data[6] += data[6] * rhs.data[10];
accum.data[7] += data[6] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
accum.data[4] += data[7] * rhs.data[12];
accum.data[5] += data[7] * rhs.data[13];
accum.data[6] += data[7] * rhs.data[14];
accum.data[7] += data[7] * rhs.data[15];
return accum;
}
/// Matrix product of size 2-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 2-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[5];
return accum;
}
};
/// Template alias for 2-by-4 matrix
template <typename Element>
using Matrix2x4 = Matrix<Element, 2, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix2x4<Element> make_Matrix2x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3
) {
return Matrix2x4<Element>(
_0_0, _0_1, _0_2, _0_3,
_1_0, _1_1, _1_2, _1_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-1 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 1> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 1;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 3;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-1 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0,
Element _1_0,
Element _2_0
) {
data[0] = _0_0;
data[1] = _1_0;
data[2] = _2_0;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> transpose() const {
Matrix<Element, 1, 3> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
m.data[2] = data[i * 1 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
data[i * 1 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> hcat(Matrix<Element, 3, 1> const & rhs) const {
return Matrix<Element, 3, 2>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-2 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> hcat(Matrix<Element, 3, 2> const & rhs) const {
return Matrix<Element, 3, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-3 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 3> const & rhs) const {
return Matrix<Element, 3, 4>::hcat(*this, rhs);
}
/// Forms a 3-by-1 matrix by vertically concatenating an Element with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Element upper, Matrix<Element, 2, 1> const & lower) {
return Matrix(
upper
, lower.at(0, 0)
, lower.at(1, 0));
}
/// Forms a 3-by-1 matrix by vertically concatenating a 2-by-1 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 1> const & upper, Element lower) {
return Matrix(
upper.at(0, 0)
, upper.at(1, 0)
, lower);
}
/// Concatenates this matrix with a an Element to form a 4-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> vcat(Element rhs) const {
return Matrix<Element, 4, 1>::vcat(*this, rhs);
}
/// Elementwise add operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
return result;
}
/// Elementwise add operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
return *this;
}
/// Elementwise subtract operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
return result;
}
/// Elementwise subtract operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
return *this;
}
/// Elementwise multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
return result;
}
/// Scalar multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
return result;
}
/// Scalar multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
return *this;
}
/// Elementwise divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
return result;
}
/// Scalar divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
return result;
}
/// Scalar divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
return *this;
}
/// Elementwise divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
return m;
}
/// Matrix product of size 3-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 1, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[1] * rhs.data[0];
accum.data[2] += data[2] * rhs.data[0];
return accum;
}
/// Matrix product of size 3-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 1, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 3-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 1, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[1] * rhs.data[0];
accum.data[3] += data[1] * rhs.data[1];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
return accum;
}
/// Matrix product of size 3-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 1, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 1, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[1] * rhs.data[0];
accum.data[4] += data[1] * rhs.data[1];
accum.data[5] += data[1] * rhs.data[2];
accum.data[6] += data[2] * rhs.data[0];
accum.data[7] += data[2] * rhs.data[1];
accum.data[8] += data[2] * rhs.data[2];
return accum;
}
/// Matrix product of size 3-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 1, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 1, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[1] * rhs.data[0];
accum.data[5] += data[1] * rhs.data[1];
accum.data[6] += data[1] * rhs.data[2];
accum.data[7] += data[1] * rhs.data[3];
accum.data[8] += data[2] * rhs.data[0];
accum.data[9] += data[2] * rhs.data[1];
accum.data[10] += data[2] * rhs.data[2];
accum.data[11] += data[2] * rhs.data[3];
return accum;
}
/// Matrix product of size 3-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 1, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 3, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Dot product of vectors with extent 3
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 3> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
/// Cross product
CUTLASS_HOST_DEVICE
Matrix cross(Matrix const &rhs) const {
return Matrix(
data[1] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[2] - data[2] * rhs.data[1],
data[0] * rhs.data[1] - data[1] * rhs.data[0]
);
}
};
/// Template alias for 3-by-1 matrix
template <typename Element>
using Matrix3x1 = Matrix<Element, 3, 1>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x1<Element> make_Matrix3x1(
Element _0_0,
Element _1_0,
Element _2_0
) {
return Matrix3x1<Element>(
_0_0,
_1_0,
_2_0
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 6;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1
) {
data[0] = _0_0; data[1] = _0_1;
data[2] = _1_0; data[3] = _1_1;
data[4] = _2_0; data[5] = _2_1;
}
/// Constucts a 3-by-2 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 2> const &row_0,
Matrix<Element, 1, 2> const &row_1,
Matrix<Element, 1, 2> const &row_2
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_1.data[0];
data[3] = row_1.data[1];
data[4] = row_2.data[0];
data[5] = row_2.data[1];
}
/// Static method to construct a 3-by-2 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 2, 1> const &column_0,
Matrix<Element, 2, 1> const &column_1
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_0.data[1];
result.data[3] = column_1.data[1];
result.data[4] = column_0.data[2];
result.data[5] = column_1.data[2];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[4];
diag.data[2] = data[8];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> transpose() const {
Matrix<Element, 2, 3> mt;
mt.data[0] = data[0];
mt.data[3] = data[1];
mt.data[1] = data[2];
mt.data[4] = data[3];
mt.data[2] = data[4];
mt.data[5] = data[5];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
m.data[2] = data[i * 2 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
data[i * 2 + j + 4] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
m.data[4] = data[i * 2 + j + 4];
m.data[5] = data[i * 2 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
data[i * 2 + j + 4] = m.data[4];
data[i * 2 + j + 5] = m.data[5];
return *this;
}
/// Forms a 3-by-2 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0)
, lhs.at(1, 0), rhs.at(1, 0)
, lhs.at(2, 0), rhs.at(2, 0));
}
/// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> hcat(Matrix<Element, 3, 1> const & rhs) const {
return Matrix<Element, 3, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 3-by-2 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 2> const & rhs) const {
return Matrix<Element, 3, 4>::hcat(*this, rhs);
}
/// Forms a 3-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 2, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, lower.at(0, 0), lower.at(0, 1)
, lower.at(1, 0), lower.at(1, 1));
}
/// Forms a 3-by-2 matrix by vertically concatenating a 2-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 2> const & upper, Matrix<Element, 1, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, upper.at(1, 0), upper.at(1, 1)
, lower.at(0, 0), lower.at(0, 1));
}
/// Concatenates this matrix with a a 1-by-2 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> vcat(Matrix<Element, 1, 2> const & rhs) const {
return Matrix<Element, 4, 2>::vcat(*this, rhs);
}
/// Forms a 3-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Element B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A, B
, C.at(0, 0), D.at(0, 0)
, C.at(1, 0), D.at(1, 0)
);
}
/// Forms a 3-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 1> const & B,
Element C, Element D) {
return Matrix(
A.at(0, 0), B.at(0, 0)
, A.at(1, 0), B.at(1, 0)
, C, D
);
}
/// Elementwise add operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
return result;
}
/// Elementwise add operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
return *this;
}
/// Elementwise subtract operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
return result;
}
/// Elementwise subtract operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
return *this;
}
/// Elementwise multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
return result;
}
/// Scalar multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
return result;
}
/// Scalar multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
return *this;
}
/// Elementwise divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
return result;
}
/// Scalar divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
return result;
}
/// Scalar divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
return *this;
}
/// Elementwise divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
return m;
}
/// Matrix product of size 3-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 2, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[2] * rhs.data[0];
accum.data[2] += data[4] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[3] * rhs.data[1];
accum.data[2] += data[5] * rhs.data[1];
return accum;
}
/// Matrix product of size 3-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[2] * rhs.data[1];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[3] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[2];
accum.data[5] += data[5] * rhs.data[3];
return accum;
}
/// Matrix product of size 3-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 3-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[2] * rhs.data[0];
accum.data[4] += data[2] * rhs.data[1];
accum.data[5] += data[2] * rhs.data[2];
accum.data[6] += data[4] * rhs.data[0];
accum.data[7] += data[4] * rhs.data[1];
accum.data[8] += data[4] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[3];
accum.data[7] += data[5] * rhs.data[4];
accum.data[8] += data[5] * rhs.data[5];
return accum;
}
/// Matrix product of size 3-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[2] * rhs.data[2];
accum.data[7] += data[2] * rhs.data[3];
accum.data[8] += data[4] * rhs.data[0];
accum.data[9] += data[4] * rhs.data[1];
accum.data[10] += data[4] * rhs.data[2];
accum.data[11] += data[4] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[3] * rhs.data[6];
accum.data[7] += data[3] * rhs.data[7];
accum.data[8] += data[5] * rhs.data[4];
accum.data[9] += data[5] * rhs.data[5];
accum.data[10] += data[5] * rhs.data[6];
accum.data[11] += data[5] * rhs.data[7];
return accum;
}
/// Matrix product of size 3-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[3];
return accum;
}
};
/// Template alias for 3-by-2 matrix
template <typename Element>
using Matrix3x2 = Matrix<Element, 3, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x2<Element> make_Matrix3x2(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1
) {
return Matrix3x2<Element>(
_0_0, _0_1,
_1_0, _1_1,
_2_0, _2_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 9;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
data[3] = _1_0; data[4] = _1_1; data[5] = _1_2;
data[6] = _2_0; data[7] = _2_1; data[8] = _2_2;
}
/// Constucts a 3-by-3 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 3> const &row_0,
Matrix<Element, 1, 3> const &row_1,
Matrix<Element, 1, 3> const &row_2
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_1.data[0];
data[4] = row_1.data[1];
data[5] = row_1.data[2];
data[6] = row_2.data[0];
data[7] = row_2.data[1];
data[8] = row_2.data[2];
}
/// Static method to construct a 3-by-3 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 3, 1> const &column_0,
Matrix<Element, 3, 1> const &column_1,
Matrix<Element, 3, 1> const &column_2
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_0.data[1];
result.data[4] = column_1.data[1];
result.data[5] = column_2.data[1];
result.data[6] = column_0.data[2];
result.data[7] = column_1.data[2];
result.data[8] = column_2.data[2];
return result;
}
/// Constructs an identity matrix
CUTLASS_HOST_DEVICE
static Matrix identity() {
Matrix m;
m.data[0] = Element(1);
m.data[4] = Element(1);
m.data[8] = Element(1);
return m;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> diagonal() const {
Matrix<Element, 3, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[4];
diag.data[2] = data[8];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> transpose() const {
Matrix<Element, 3, 3> mt;
mt.data[0] = data[0];
mt.data[3] = data[1];
mt.data[6] = data[2];
mt.data[1] = data[3];
mt.data[4] = data[4];
mt.data[7] = data[5];
mt.data[2] = data[6];
mt.data[5] = data[7];
mt.data[8] = data[8];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
m.data[2] = data[i * 3 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
data[i * 3 + j + 6] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
m.data[4] = data[i * 3 + j + 6];
m.data[5] = data[i * 3 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
data[i * 3 + j + 6] = m.data[4];
data[i * 3 + j + 7] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
m.data[6] = data[i * 3 + j + 6];
m.data[7] = data[i * 3 + j + 7];
m.data[8] = data[i * 3 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
data[i * 3 + j + 6] = m.data[6];
data[i * 3 + j + 7] = m.data[7];
data[i * 3 + j + 8] = m.data[8];
return *this;
}
/// Forms a 3-by-3 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1));
}
/// Forms a 3-by-3 matrix by horizontally concatenating a 3-by-2 matrix with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 2> const & lhs, Matrix<Element, 3, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0));
}
/// Concatenates this matrix with a a 3-by-1 matrix to form a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> hcat(Matrix<Element, 3, 1> const & rhs) const {
return Matrix<Element, 3, 4>::hcat(*this, rhs);
}
/// Forms a 3-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 2-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 2, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2));
}
/// Forms a 3-by-3 matrix by vertically concatenating a 2-by-3 matrix with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 3> const & upper, Matrix<Element, 1, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2));
}
/// Concatenates this matrix with a a 1-by-3 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> vcat(Matrix<Element, 1, 3> const & rhs) const {
return Matrix<Element, 4, 3>::vcat(*this, rhs);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1)
, C.at(0, 0), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Element B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B
, C.at(0, 0), C.at(0, 1), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), D.at(1, 0)
);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 2> const & B,
Element C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), B.at(1, 0), B.at(1, 1)
, C, D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 3-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 1, 2> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), D
);
}
/// Elementwise add operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
return result;
}
/// Elementwise add operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
return *this;
}
/// Elementwise subtract operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
return result;
}
/// Elementwise subtract operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
return *this;
}
/// Elementwise multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
return result;
}
/// Scalar multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
return result;
}
/// Scalar multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
return *this;
}
/// Elementwise divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
return result;
}
/// Scalar divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
return result;
}
/// Scalar divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
return *this;
}
/// Elementwise divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
return m;
}
/// Matrix product of size 3-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 3, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[3] * rhs.data[0];
accum.data[2] += data[6] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[4] * rhs.data[1];
accum.data[2] += data[7] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[5] * rhs.data[2];
accum.data[2] += data[8] * rhs.data[2];
return accum;
}
/// Matrix product of size 3-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[3] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[1];
accum.data[4] += data[6] * rhs.data[0];
accum.data[5] += data[6] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[4] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[7] * rhs.data[2];
accum.data[5] += data[7] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[5] * rhs.data[4];
accum.data[3] += data[5] * rhs.data[5];
accum.data[4] += data[8] * rhs.data[4];
accum.data[5] += data[8] * rhs.data[5];
return accum;
}
/// Matrix product of size 3-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[0];
accum.data[4] += data[3] * rhs.data[1];
accum.data[5] += data[3] * rhs.data[2];
accum.data[6] += data[6] * rhs.data[0];
accum.data[7] += data[6] * rhs.data[1];
accum.data[8] += data[6] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[7] * rhs.data[3];
accum.data[7] += data[7] * rhs.data[4];
accum.data[8] += data[7] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[5] * rhs.data[6];
accum.data[4] += data[5] * rhs.data[7];
accum.data[5] += data[5] * rhs.data[8];
accum.data[6] += data[8] * rhs.data[6];
accum.data[7] += data[8] * rhs.data[7];
accum.data[8] += data[8] * rhs.data[8];
return accum;
}
/// Matrix product of size 3-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 3-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[0];
accum.data[5] += data[3] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[2];
accum.data[7] += data[3] * rhs.data[3];
accum.data[8] += data[6] * rhs.data[0];
accum.data[9] += data[6] * rhs.data[1];
accum.data[10] += data[6] * rhs.data[2];
accum.data[11] += data[6] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[4] * rhs.data[6];
accum.data[7] += data[4] * rhs.data[7];
accum.data[8] += data[7] * rhs.data[4];
accum.data[9] += data[7] * rhs.data[5];
accum.data[10] += data[7] * rhs.data[6];
accum.data[11] += data[7] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[5] * rhs.data[8];
accum.data[5] += data[5] * rhs.data[9];
accum.data[6] += data[5] * rhs.data[10];
accum.data[7] += data[5] * rhs.data[11];
accum.data[8] += data[8] * rhs.data[8];
accum.data[9] += data[8] * rhs.data[9];
accum.data[10] += data[8] * rhs.data[10];
accum.data[11] += data[8] * rhs.data[11];
return accum;
}
/// Matrix product of size 3-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[4];
accum += data[8];
return accum;
}
/// Returns 3-by-3 rotation matrix around the X axis
CUTLASS_HOST_DEVICE
static Matrix rotation_X(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(1, 1) = c;
m.at(1, 2) = -s;
m.at(2, 1) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 3-by-3 rotation matrix around the Y axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Y(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(2, 0) = -s;
m.at(0, 2) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 3-by-3 rotation matrix around the Z axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Z(Element theta) {
Matrix m = Matrix::identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(0, 1) = -s;
m.at(1, 0) = s;
m.at(1, 1) = c;
return m;
}
/// Returns a 3-by-3 rotation matrix around a unit-length axis
CUTLASS_HOST_DEVICE
static Matrix rotation(Element theta, Matrix<Element, 3, 1> const &u) {
Element x = u.data[0];
Element y = u.data[1];
Element z = u.data[2];
Element c = fast_cos(theta);
Element s = fast_sin(theta);
Element one_minus_cos = Element(1) - fast_cos(theta);
Matrix m;
m.set_slice3x3({
c + x * x * one_minus_cos, x * y * one_minus_cos - z * s, x * z * one_minus_cos + y * s,
y * x * one_minus_cos * z * s, c + y * y * one_minus_cos, y * z * one_minus_cos - x * s,
z * x * one_minus_cos - y * s, z * y * one_minus_cos + x * s, c + z * z * one_minus_cos
});
return m;
}
/// Returns a 3-by-3 reflection about the plane specified by the
/// unit-length normal vector n_unit
CUTLASS_HOST_DEVICE
static Matrix reflection(Matrix<Element, 3, 1> const &n_unit) {
Element a = n_unit.data[0];
Element b = n_unit.data[1];
Element c = n_unit.data[2];
Matrix m = Matrix::identity();
m.set_slice3x3({
Element(1) - Element(2) * a * a, Element(-2) * a * b, Element(-2) * a * c,
Element(-2) * a * b, Element(1) - Element(2) * b * b, Element(-2) * b * c,
Element(-2) * a * c, Element(-2) * b * c, Element(1) - Element(2) * c * c
});
return m;
}
/// Computes the determinant of a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Element determinant(Element accum = Element()) const {
accum += at(0, 0) * Matrix<Element, 2, 2>({ at(1, 1), at(1, 2), at(2, 1), at(2, 2) }).determinant();
accum -= at(0, 1) * Matrix<Element, 2, 2>({ at(1, 0), at(1, 2), at(2, 0), at(2, 2) }).determinant();
accum += at(0, 2) * Matrix<Element, 2, 2>({ at(1, 0), at(1, 1), at(2, 0), at(2, 1) }).determinant();
return accum;
}
/// Computes the inverse of a 3-by-3 matrix given
/// the matrix's determinant
CUTLASS_HOST_DEVICE
Matrix inverse(Element det) const {
return Matrix(
at(1, 1) * at(2, 2) - at(1, 2) * at(2, 1),
at(0, 2) * at(2, 1) - at(0, 1) * at(2, 2),
at(0, 1) * at(1, 2) - at(0, 2) * at(1, 1),
at(1, 2) * at(2, 0) - at(1, 0) * at(2, 2),
at(0, 0) * at(2, 2) - at(0, 2) * at(2, 0),
at(0, 2) * at(1, 0) - at(0, 0) * at(1, 2),
at(1, 0) * at(2, 1) - at(1, 1) * at(2, 0),
at(0, 1) * at(2, 0) - at(0, 0) * at(2, 1),
at(0, 0) * at(1, 1) - at(0, 1) * at(1, 0)
) * (Element(1) / det);
}
/// Computes the inverse of a 3-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix inverse() const {
return inverse(determinant());
}
};
/// Template alias for 3-by-3 matrix
template <typename Element>
using Matrix3x3 = Matrix<Element, 3, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x3<Element> make_Matrix3x3(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2
) {
return Matrix3x3<Element>(
_0_0, _0_1, _0_2,
_1_0, _1_1, _1_2,
_2_0, _2_1, _2_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 3-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 3, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 3;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 12;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 3-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 3-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3;
data[8] = _2_0; data[9] = _2_1; data[10] = _2_2; data[11] = _2_3;
}
/// Constucts a 3-by-4 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 4> const &row_0,
Matrix<Element, 1, 4> const &row_1,
Matrix<Element, 1, 4> const &row_2
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_0.data[3];
data[4] = row_1.data[0];
data[5] = row_1.data[1];
data[6] = row_1.data[2];
data[7] = row_1.data[3];
data[8] = row_2.data[0];
data[9] = row_2.data[1];
data[10] = row_2.data[2];
data[11] = row_2.data[3];
}
/// Static method to construct a 3-by-4 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 4, 1> const &column_0,
Matrix<Element, 4, 1> const &column_1,
Matrix<Element, 4, 1> const &column_2,
Matrix<Element, 4, 1> const &column_3
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_3.data[0];
result.data[4] = column_0.data[1];
result.data[5] = column_1.data[1];
result.data[6] = column_2.data[1];
result.data[7] = column_3.data[1];
result.data[8] = column_0.data[2];
result.data[9] = column_1.data[2];
result.data[10] = column_2.data[2];
result.data[11] = column_3.data[2];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
m.data[9] = s;
m.data[10] = s;
m.data[11] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[4] = diag.data[1];
m.data[8] = diag.data[2];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> diagonal() const {
Matrix<Element, 3, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[4];
diag.data[2] = data[8];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> transpose() const {
Matrix<Element, 4, 3> mt;
mt.data[0] = data[0];
mt.data[3] = data[1];
mt.data[6] = data[2];
mt.data[9] = data[3];
mt.data[1] = data[4];
mt.data[4] = data[5];
mt.data[7] = data[6];
mt.data[10] = data[7];
mt.data[2] = data[8];
mt.data[5] = data[9];
mt.data[8] = data[10];
mt.data[11] = data[11];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 3 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const {
Matrix<Element, 2, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
m.data[2] = data[i * 4 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
data[i * 4 + j + 8] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> column(int j) const {
return slice_3x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 3, 1> const &v, int j =0) {
return set_slice_3x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
m.data[4] = data[i * 4 + j + 8];
m.data[5] = data[i * 4 + j + 9];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
data[i * 4 + j + 8] = m.data[4];
data[i * 4 + j + 9] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
m.data[6] = data[i * 4 + j + 8];
m.data[7] = data[i * 4 + j + 9];
m.data[8] = data[i * 4 + j + 10];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
data[i * 4 + j + 8] = m.data[6];
data[i * 4 + j + 9] = m.data[7];
data[i * 4 + j + 10] = m.data[8];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> slice_3x4(int i = 0, int j = 0) const {
Matrix<Element, 3, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
m.data[8] = data[i * 4 + j + 8];
m.data[9] = data[i * 4 + j + 9];
m.data[10] = data[i * 4 + j + 10];
m.data[11] = data[i * 4 + j + 11];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x4(Matrix<Element, 3, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
data[i * 4 + j + 8] = m.data[8];
data[i * 4 + j + 9] = m.data[9];
data[i * 4 + j + 10] = m.data[10];
data[i * 4 + j + 11] = m.data[11];
return *this;
}
/// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-1 matrix with a 3-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 1> const & lhs, Matrix<Element, 3, 3> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1), rhs.at(2, 2));
}
/// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-2 matrix with a 3-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 2> const & lhs, Matrix<Element, 3, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0), rhs.at(2, 1));
}
/// Forms a 3-by-4 matrix by horizontally concatenating a 3-by-3 matrix with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 3, 3> const & lhs, Matrix<Element, 3, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), lhs.at(2, 2), rhs.at(2, 0));
}
/// Forms a 3-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 2-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 2, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3));
}
/// Forms a 3-by-4 matrix by vertically concatenating a 2-by-4 matrix with a 1-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 4> const & upper, Matrix<Element, 1, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3));
}
/// Concatenates this matrix with a a 1-by-4 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> vcat(Matrix<Element, 1, 4> const & rhs) const {
return Matrix<Element, 4, 4>::vcat(*this, rhs);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 3> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 3> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1), B.at(0, 2)
, C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2)
, C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 3> const & A, Element B,
Matrix<Element, 2, 3> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 3> const & B,
Element C, Matrix<Element, 1, 3> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2)
, A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2)
, C, D.at(0, 0), D.at(0, 1), D.at(0, 2)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 2> const & B,
Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 3-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 3> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 1, 3> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D
);
}
/// Elementwise add operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
result.data[9] = data[9] + rhs.data[9];
result.data[10] = data[10] + rhs.data[10];
result.data[11] = data[11] + rhs.data[11];
return result;
}
/// Elementwise add operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
data[9] += rhs.data[9];
data[10] += rhs.data[10];
data[11] += rhs.data[11];
return *this;
}
/// Elementwise subtract operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
result.data[9] = data[9] - rhs.data[9];
result.data[10] = data[10] - rhs.data[10];
result.data[11] = data[11] - rhs.data[11];
return result;
}
/// Elementwise subtract operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
data[9] -= rhs.data[9];
data[10] -= rhs.data[10];
data[11] -= rhs.data[11];
return *this;
}
/// Elementwise multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
result.data[9] = data[9] * rhs.data[9];
result.data[10] = data[10] * rhs.data[10];
result.data[11] = data[11] * rhs.data[11];
return result;
}
/// Scalar multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
result.data[9] = data[9] * s;
result.data[10] = data[10] * s;
result.data[11] = data[11] * s;
return result;
}
/// Scalar multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
data[9] *= s;
data[10] *= s;
data[11] *= s;
return *this;
}
/// Elementwise divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
result.data[9] = data[9] / rhs.data[9];
result.data[10] = data[10] / rhs.data[10];
result.data[11] = data[11] / rhs.data[11];
return result;
}
/// Scalar divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
result.data[9] = data[9] / s;
result.data[10] = data[10] / s;
result.data[11] = data[11] / s;
return result;
}
/// Scalar divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
data[9] /= s;
data[10] /= s;
data[11] /= s;
return *this;
}
/// Elementwise divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (3-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
data[9] /= rhs.data[9];
data[10] /= rhs.data[10];
data[11] /= rhs.data[11];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
m.data[9] = -m.data[9];
m.data[10] = -m.data[10];
m.data[11] = -m.data[11];
return m;
}
/// Matrix product of size 3-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> product(
Matrix<Element, 4, 1> const &rhs,
Matrix<Element, 3, 1> accum = Matrix<Element, 3, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[4] * rhs.data[0];
accum.data[2] += data[8] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[5] * rhs.data[1];
accum.data[2] += data[9] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[6] * rhs.data[2];
accum.data[2] += data[10] * rhs.data[2];
// k=3
accum.data[0] += data[3] * rhs.data[3];
accum.data[1] += data[7] * rhs.data[3];
accum.data[2] += data[11] * rhs.data[3];
return accum;
}
/// Matrix product of size 3-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 3, 2> accum = Matrix<Element, 3, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[4] * rhs.data[1];
accum.data[4] += data[8] * rhs.data[0];
accum.data[5] += data[8] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[5] * rhs.data[2];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[9] * rhs.data[2];
accum.data[5] += data[9] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[6] * rhs.data[4];
accum.data[3] += data[6] * rhs.data[5];
accum.data[4] += data[10] * rhs.data[4];
accum.data[5] += data[10] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
accum.data[2] += data[7] * rhs.data[6];
accum.data[3] += data[7] * rhs.data[7];
accum.data[4] += data[11] * rhs.data[6];
accum.data[5] += data[11] * rhs.data[7];
return accum;
}
/// Matrix product of size 3-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 3, 3> accum = Matrix<Element, 3, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[0];
accum.data[4] += data[4] * rhs.data[1];
accum.data[5] += data[4] * rhs.data[2];
accum.data[6] += data[8] * rhs.data[0];
accum.data[7] += data[8] * rhs.data[1];
accum.data[8] += data[8] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[9] * rhs.data[3];
accum.data[7] += data[9] * rhs.data[4];
accum.data[8] += data[9] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[6] * rhs.data[6];
accum.data[4] += data[6] * rhs.data[7];
accum.data[5] += data[6] * rhs.data[8];
accum.data[6] += data[10] * rhs.data[6];
accum.data[7] += data[10] * rhs.data[7];
accum.data[8] += data[10] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
accum.data[3] += data[7] * rhs.data[9];
accum.data[4] += data[7] * rhs.data[10];
accum.data[5] += data[7] * rhs.data[11];
accum.data[6] += data[11] * rhs.data[9];
accum.data[7] += data[11] * rhs.data[10];
accum.data[8] += data[11] * rhs.data[11];
return accum;
}
/// Matrix product of size 3-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 3, 4> accum = Matrix<Element, 3, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[4] * rhs.data[2];
accum.data[7] += data[4] * rhs.data[3];
accum.data[8] += data[8] * rhs.data[0];
accum.data[9] += data[8] * rhs.data[1];
accum.data[10] += data[8] * rhs.data[2];
accum.data[11] += data[8] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[6];
accum.data[7] += data[5] * rhs.data[7];
accum.data[8] += data[9] * rhs.data[4];
accum.data[9] += data[9] * rhs.data[5];
accum.data[10] += data[9] * rhs.data[6];
accum.data[11] += data[9] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[6] * rhs.data[8];
accum.data[5] += data[6] * rhs.data[9];
accum.data[6] += data[6] * rhs.data[10];
accum.data[7] += data[6] * rhs.data[11];
accum.data[8] += data[10] * rhs.data[8];
accum.data[9] += data[10] * rhs.data[9];
accum.data[10] += data[10] * rhs.data[10];
accum.data[11] += data[10] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
accum.data[4] += data[7] * rhs.data[12];
accum.data[5] += data[7] * rhs.data[13];
accum.data[6] += data[7] * rhs.data[14];
accum.data[7] += data[7] * rhs.data[15];
accum.data[8] += data[11] * rhs.data[12];
accum.data[9] += data[11] * rhs.data[13];
accum.data[10] += data[11] * rhs.data[14];
accum.data[11] += data[11] * rhs.data[15];
return accum;
}
/// Matrix product of size 3-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 3-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
accum += data[9];
accum += data[10];
accum += data[11];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
accum += data[9] * data[9];
accum += data[10] * data[10];
accum += data[11] * data[11];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[5];
accum += data[10];
return accum;
}
};
/// Template alias for 3-by-4 matrix
template <typename Element>
using Matrix3x4 = Matrix<Element, 3, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix3x4<Element> make_Matrix3x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3
) {
return Matrix3x4<Element>(
_0_0, _0_1, _0_2, _0_3,
_1_0, _1_1, _1_2, _1_3,
_2_0, _2_1, _2_2, _2_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-1 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 1> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 1;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 4;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-1 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-1 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0,
Element _1_0,
Element _2_0,
Element _3_0
) {
data[0] = _0_0;
data[1] = _1_0;
data[2] = _2_0;
data[3] = _3_0;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> transpose() const {
Matrix<Element, 1, 4> mt;
mt.data[0] = data[0];
mt.data[1] = data[1];
mt.data[2] = data[2];
mt.data[3] = data[3];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
m.data[2] = data[i * 1 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
data[i * 1 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 1 + j + 0];
m.data[1] = data[i * 1 + j + 1];
m.data[2] = data[i * 1 + j + 2];
m.data[3] = data[i * 1 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 1 + j + 0] = m.data[0];
data[i * 1 + j + 1] = m.data[1];
data[i * 1 + j + 2] = m.data[2];
data[i * 1 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> hcat(Matrix<Element, 4, 1> const & rhs) const {
return Matrix<Element, 4, 2>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 4-by-2 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> hcat(Matrix<Element, 4, 2> const & rhs) const {
return Matrix<Element, 4, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 4-by-3 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 3> const & rhs) const {
return Matrix<Element, 4, 4>::hcat(*this, rhs);
}
/// Forms a 4-by-1 matrix by vertically concatenating an Element with a 3-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Element upper, Matrix<Element, 3, 1> const & lower) {
return Matrix(
upper
, lower.at(0, 0)
, lower.at(1, 0)
, lower.at(2, 0));
}
/// Forms a 4-by-1 matrix by vertically concatenating a 2-by-1 matrix with a 2-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 1> const & upper, Matrix<Element, 2, 1> const & lower) {
return Matrix(
upper.at(0, 0)
, upper.at(1, 0)
, lower.at(0, 0)
, lower.at(1, 0));
}
/// Forms a 4-by-1 matrix by vertically concatenating a 3-by-1 matrix with an Element
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 1> const & upper, Element lower) {
return Matrix(
upper.at(0, 0)
, upper.at(1, 0)
, upper.at(2, 0)
, lower);
}
/// Elementwise add operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
return result;
}
/// Elementwise add operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
return *this;
}
/// Elementwise subtract operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
return result;
}
/// Elementwise subtract operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
return *this;
}
/// Elementwise multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
return result;
}
/// Scalar multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
return result;
}
/// Scalar multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
return *this;
}
/// Elementwise divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
return result;
}
/// Scalar divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
return result;
}
/// Scalar divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
return *this;
}
/// Elementwise divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-1)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
return m;
}
/// Matrix product of size 4-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 1, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[1] * rhs.data[0];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[0];
return accum;
}
/// Matrix product of size 4-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 1, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-1-by-1
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 1, 1> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 4-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 1, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[1] * rhs.data[0];
accum.data[3] += data[1] * rhs.data[1];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[0];
accum.data[7] += data[3] * rhs.data[1];
return accum;
}
/// Matrix product of size 4-by-2-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 1, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 1, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[1] * rhs.data[0];
accum.data[4] += data[1] * rhs.data[1];
accum.data[5] += data[1] * rhs.data[2];
accum.data[6] += data[2] * rhs.data[0];
accum.data[7] += data[2] * rhs.data[1];
accum.data[8] += data[2] * rhs.data[2];
accum.data[9] += data[3] * rhs.data[0];
accum.data[10] += data[3] * rhs.data[1];
accum.data[11] += data[3] * rhs.data[2];
return accum;
}
/// Matrix product of size 4-by-3-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 1, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 1, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[1] * rhs.data[0];
accum.data[5] += data[1] * rhs.data[1];
accum.data[6] += data[1] * rhs.data[2];
accum.data[7] += data[1] * rhs.data[3];
accum.data[8] += data[2] * rhs.data[0];
accum.data[9] += data[2] * rhs.data[1];
accum.data[10] += data[2] * rhs.data[2];
accum.data[11] += data[2] * rhs.data[3];
accum.data[12] += data[3] * rhs.data[0];
accum.data[13] += data[3] * rhs.data[1];
accum.data[14] += data[3] * rhs.data[2];
accum.data[15] += data[3] * rhs.data[3];
return accum;
}
/// Matrix product of size 4-by-4-by-1
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 1, 4> const &rhs) const {
return product(rhs);
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 4, 1> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Dot product of vectors with extent 4
CUTLASS_HOST_DEVICE
Element dot(Matrix<Element, 1, 4> const &rhs, Element accum = Element()) const {
accum += data[0] * rhs.data[0];
accum += data[1] * rhs.data[1];
accum += data[2] * rhs.data[2];
accum += data[3] * rhs.data[3];
return accum;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
return accum;
}
};
/// Template alias for 4-by-1 matrix
template <typename Element>
using Matrix4x1 = Matrix<Element, 4, 1>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x1<Element> make_Matrix4x1(
Element _0_0,
Element _1_0,
Element _2_0,
Element _3_0
) {
return Matrix4x1<Element>(
_0_0,
_1_0,
_2_0,
_3_0
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-2 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 2> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 2;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 8;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-2 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-2 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1,
Element _3_0, Element _3_1
) {
data[0] = _0_0; data[1] = _0_1;
data[2] = _1_0; data[3] = _1_1;
data[4] = _2_0; data[5] = _2_1;
data[6] = _3_0; data[7] = _3_1;
}
/// Constucts a 4-by-2 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 2> const &row_0,
Matrix<Element, 1, 2> const &row_1,
Matrix<Element, 1, 2> const &row_2,
Matrix<Element, 1, 2> const &row_3
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_1.data[0];
data[3] = row_1.data[1];
data[4] = row_2.data[0];
data[5] = row_2.data[1];
data[6] = row_3.data[0];
data[7] = row_3.data[1];
}
/// Static method to construct a 4-by-2 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 2, 1> const &column_0,
Matrix<Element, 2, 1> const &column_1
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_0.data[1];
result.data[3] = column_1.data[1];
result.data[4] = column_0.data[2];
result.data[5] = column_1.data[2];
result.data[6] = column_0.data[3];
result.data[7] = column_1.data[3];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 2, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 2> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> diagonal() const {
Matrix<Element, 2, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[5];
diag.data[2] = data[10];
diag.data[3] = data[15];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> transpose() const {
Matrix<Element, 2, 4> mt;
mt.data[0] = data[0];
mt.data[4] = data[1];
mt.data[1] = data[2];
mt.data[5] = data[3];
mt.data[2] = data[4];
mt.data[6] = data[5];
mt.data[3] = data[6];
mt.data[7] = data[7];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> row(int i) const {
return slice_1x2(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 2> const &v, int i = 0) {
return set_slice_1x2(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
m.data[2] = data[i * 2 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
data[i * 2 + j + 4] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
m.data[4] = data[i * 2 + j + 4];
m.data[5] = data[i * 2 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
data[i * 2 + j + 4] = m.data[4];
data[i * 2 + j + 5] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 2];
m.data[2] = data[i * 2 + j + 4];
m.data[3] = data[i * 2 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 2] = m.data[1];
data[i * 2 + j + 4] = m.data[2];
data[i * 2 + j + 6] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const {
Matrix<Element, 4, 2> m;
m.data[0] = data[i * 2 + j + 0];
m.data[1] = data[i * 2 + j + 1];
m.data[2] = data[i * 2 + j + 2];
m.data[3] = data[i * 2 + j + 3];
m.data[4] = data[i * 2 + j + 4];
m.data[5] = data[i * 2 + j + 5];
m.data[6] = data[i * 2 + j + 6];
m.data[7] = data[i * 2 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) {
data[i * 2 + j + 0] = m.data[0];
data[i * 2 + j + 1] = m.data[1];
data[i * 2 + j + 2] = m.data[2];
data[i * 2 + j + 3] = m.data[3];
data[i * 2 + j + 4] = m.data[4];
data[i * 2 + j + 5] = m.data[5];
data[i * 2 + j + 6] = m.data[6];
data[i * 2 + j + 7] = m.data[7];
return *this;
}
/// Forms a 4-by-2 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0)
, lhs.at(1, 0), rhs.at(1, 0)
, lhs.at(2, 0), rhs.at(2, 0)
, lhs.at(3, 0), rhs.at(3, 0));
}
/// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> hcat(Matrix<Element, 4, 1> const & rhs) const {
return Matrix<Element, 4, 3>::hcat(*this, rhs);
}
/// Concatenates this matrix with a a 4-by-2 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 2> const & rhs) const {
return Matrix<Element, 4, 4>::hcat(*this, rhs);
}
/// Forms a 4-by-2 matrix by vertically concatenating a 1-by-2 matrix with a 3-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 2> const & upper, Matrix<Element, 3, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, lower.at(0, 0), lower.at(0, 1)
, lower.at(1, 0), lower.at(1, 1)
, lower.at(2, 0), lower.at(2, 1));
}
/// Forms a 4-by-2 matrix by vertically concatenating a 2-by-2 matrix with a 2-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 2> const & upper, Matrix<Element, 2, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, upper.at(1, 0), upper.at(1, 1)
, lower.at(0, 0), lower.at(0, 1)
, lower.at(1, 0), lower.at(1, 1));
}
/// Forms a 4-by-2 matrix by vertically concatenating a 3-by-2 matrix with a 1-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 2> const & upper, Matrix<Element, 1, 2> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1)
, upper.at(1, 0), upper.at(1, 1)
, upper.at(2, 0), upper.at(2, 1)
, lower.at(0, 0), lower.at(0, 1));
}
/// Forms a 4-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Element B,
Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 1> const & D) {
return Matrix(
A, B
, C.at(0, 0), D.at(0, 0)
, C.at(1, 0), D.at(1, 0)
, C.at(2, 0), D.at(2, 0)
);
}
/// Forms a 4-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0)
, A.at(1, 0), B.at(1, 0)
, C.at(0, 0), D.at(0, 0)
, C.at(1, 0), D.at(1, 0)
);
}
/// Forms a 4-by-2 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 1> const & B,
Element C, Element D) {
return Matrix(
A.at(0, 0), B.at(0, 0)
, A.at(1, 0), B.at(1, 0)
, A.at(2, 0), B.at(2, 0)
, C, D
);
}
/// Elementwise add operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
return result;
}
/// Elementwise add operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
return *this;
}
/// Elementwise subtract operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
return result;
}
/// Elementwise subtract operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
return *this;
}
/// Elementwise multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
return result;
}
/// Scalar multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
return result;
}
/// Scalar multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
return *this;
}
/// Elementwise divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
return result;
}
/// Scalar divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
return result;
}
/// Scalar divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
return *this;
}
/// Elementwise divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-2)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
return m;
}
/// Matrix product of size 4-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 2, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[2] * rhs.data[0];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[6] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[3] * rhs.data[1];
accum.data[2] += data[5] * rhs.data[1];
accum.data[3] += data[7] * rhs.data[1];
return accum;
}
/// Matrix product of size 4-by-1-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 2, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 2, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[2] * rhs.data[0];
accum.data[3] += data[2] * rhs.data[1];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[6] * rhs.data[0];
accum.data[7] += data[6] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[3] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[2];
accum.data[5] += data[5] * rhs.data[3];
accum.data[6] += data[7] * rhs.data[2];
accum.data[7] += data[7] * rhs.data[3];
return accum;
}
/// Matrix product of size 4-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 2, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-2
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 2, 2> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 4-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 2, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[2] * rhs.data[0];
accum.data[4] += data[2] * rhs.data[1];
accum.data[5] += data[2] * rhs.data[2];
accum.data[6] += data[4] * rhs.data[0];
accum.data[7] += data[4] * rhs.data[1];
accum.data[8] += data[4] * rhs.data[2];
accum.data[9] += data[6] * rhs.data[0];
accum.data[10] += data[6] * rhs.data[1];
accum.data[11] += data[6] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[3] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[3];
accum.data[7] += data[5] * rhs.data[4];
accum.data[8] += data[5] * rhs.data[5];
accum.data[9] += data[7] * rhs.data[3];
accum.data[10] += data[7] * rhs.data[4];
accum.data[11] += data[7] * rhs.data[5];
return accum;
}
/// Matrix product of size 4-by-3-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 2, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 2, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[2] * rhs.data[0];
accum.data[5] += data[2] * rhs.data[1];
accum.data[6] += data[2] * rhs.data[2];
accum.data[7] += data[2] * rhs.data[3];
accum.data[8] += data[4] * rhs.data[0];
accum.data[9] += data[4] * rhs.data[1];
accum.data[10] += data[4] * rhs.data[2];
accum.data[11] += data[4] * rhs.data[3];
accum.data[12] += data[6] * rhs.data[0];
accum.data[13] += data[6] * rhs.data[1];
accum.data[14] += data[6] * rhs.data[2];
accum.data[15] += data[6] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[3] * rhs.data[4];
accum.data[5] += data[3] * rhs.data[5];
accum.data[6] += data[3] * rhs.data[6];
accum.data[7] += data[3] * rhs.data[7];
accum.data[8] += data[5] * rhs.data[4];
accum.data[9] += data[5] * rhs.data[5];
accum.data[10] += data[5] * rhs.data[6];
accum.data[11] += data[5] * rhs.data[7];
accum.data[12] += data[7] * rhs.data[4];
accum.data[13] += data[7] * rhs.data[5];
accum.data[14] += data[7] * rhs.data[6];
accum.data[15] += data[7] * rhs.data[7];
return accum;
}
/// Matrix product of size 4-by-4-by-2
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 2, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[3];
return accum;
}
};
/// Template alias for 4-by-2 matrix
template <typename Element>
using Matrix4x2 = Matrix<Element, 4, 2>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x2<Element> make_Matrix4x2(
Element _0_0, Element _0_1,
Element _1_0, Element _1_1,
Element _2_0, Element _2_1,
Element _3_0, Element _3_1
) {
return Matrix4x2<Element>(
_0_0, _0_1,
_1_0, _1_1,
_2_0, _2_1,
_3_0, _3_1
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-3 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 3> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 3;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 12;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-3 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-3 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2,
Element _3_0, Element _3_1, Element _3_2
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2;
data[3] = _1_0; data[4] = _1_1; data[5] = _1_2;
data[6] = _2_0; data[7] = _2_1; data[8] = _2_2;
data[9] = _3_0; data[10] = _3_1; data[11] = _3_2;
}
/// Constucts a 4-by-3 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 3> const &row_0,
Matrix<Element, 1, 3> const &row_1,
Matrix<Element, 1, 3> const &row_2,
Matrix<Element, 1, 3> const &row_3
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_1.data[0];
data[4] = row_1.data[1];
data[5] = row_1.data[2];
data[6] = row_2.data[0];
data[7] = row_2.data[1];
data[8] = row_2.data[2];
data[9] = row_3.data[0];
data[10] = row_3.data[1];
data[11] = row_3.data[2];
}
/// Static method to construct a 4-by-3 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 3, 1> const &column_0,
Matrix<Element, 3, 1> const &column_1,
Matrix<Element, 3, 1> const &column_2
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_0.data[1];
result.data[4] = column_1.data[1];
result.data[5] = column_2.data[1];
result.data[6] = column_0.data[2];
result.data[7] = column_1.data[2];
result.data[8] = column_2.data[2];
result.data[9] = column_0.data[3];
result.data[10] = column_1.data[3];
result.data[11] = column_2.data[3];
return result;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
m.data[9] = s;
m.data[10] = s;
m.data[11] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 3, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 3> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> diagonal() const {
Matrix<Element, 3, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[5];
diag.data[2] = data[10];
diag.data[3] = data[15];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> transpose() const {
Matrix<Element, 3, 4> mt;
mt.data[0] = data[0];
mt.data[4] = data[1];
mt.data[8] = data[2];
mt.data[1] = data[3];
mt.data[5] = data[4];
mt.data[9] = data[5];
mt.data[2] = data[6];
mt.data[6] = data[7];
mt.data[10] = data[8];
mt.data[3] = data[9];
mt.data[7] = data[10];
mt.data[11] = data[11];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> row(int i) const {
return slice_1x3(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 3> const &v, int i = 0) {
return set_slice_1x3(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
m.data[2] = data[i * 3 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
data[i * 3 + j + 6] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
m.data[4] = data[i * 3 + j + 6];
m.data[5] = data[i * 3 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
data[i * 3 + j + 6] = m.data[4];
data[i * 3 + j + 7] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
m.data[6] = data[i * 3 + j + 6];
m.data[7] = data[i * 3 + j + 7];
m.data[8] = data[i * 3 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
data[i * 3 + j + 6] = m.data[6];
data[i * 3 + j + 7] = m.data[7];
data[i * 3 + j + 8] = m.data[8];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 3];
m.data[2] = data[i * 3 + j + 6];
m.data[3] = data[i * 3 + j + 9];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 3] = m.data[1];
data[i * 3 + j + 6] = m.data[2];
data[i * 3 + j + 9] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const {
Matrix<Element, 4, 2> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 3];
m.data[3] = data[i * 3 + j + 4];
m.data[4] = data[i * 3 + j + 6];
m.data[5] = data[i * 3 + j + 7];
m.data[6] = data[i * 3 + j + 9];
m.data[7] = data[i * 3 + j + 10];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 3] = m.data[2];
data[i * 3 + j + 4] = m.data[3];
data[i * 3 + j + 6] = m.data[4];
data[i * 3 + j + 7] = m.data[5];
data[i * 3 + j + 9] = m.data[6];
data[i * 3 + j + 10] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> slice_4x3(int i = 0, int j = 0) const {
Matrix<Element, 4, 3> m;
m.data[0] = data[i * 3 + j + 0];
m.data[1] = data[i * 3 + j + 1];
m.data[2] = data[i * 3 + j + 2];
m.data[3] = data[i * 3 + j + 3];
m.data[4] = data[i * 3 + j + 4];
m.data[5] = data[i * 3 + j + 5];
m.data[6] = data[i * 3 + j + 6];
m.data[7] = data[i * 3 + j + 7];
m.data[8] = data[i * 3 + j + 8];
m.data[9] = data[i * 3 + j + 9];
m.data[10] = data[i * 3 + j + 10];
m.data[11] = data[i * 3 + j + 11];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x3(Matrix<Element, 4, 3> const &m, int i = 0, int j = 0) {
data[i * 3 + j + 0] = m.data[0];
data[i * 3 + j + 1] = m.data[1];
data[i * 3 + j + 2] = m.data[2];
data[i * 3 + j + 3] = m.data[3];
data[i * 3 + j + 4] = m.data[4];
data[i * 3 + j + 5] = m.data[5];
data[i * 3 + j + 6] = m.data[6];
data[i * 3 + j + 7] = m.data[7];
data[i * 3 + j + 8] = m.data[8];
data[i * 3 + j + 9] = m.data[9];
data[i * 3 + j + 10] = m.data[10];
data[i * 3 + j + 11] = m.data[11];
return *this;
}
/// Forms a 4-by-3 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1)
, lhs.at(3, 0), rhs.at(3, 0), rhs.at(3, 1));
}
/// Forms a 4-by-3 matrix by horizontally concatenating a 4-by-2 matrix with a 4-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 2> const & lhs, Matrix<Element, 4, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0)
, lhs.at(3, 0), lhs.at(3, 1), rhs.at(3, 0));
}
/// Concatenates this matrix with a a 4-by-1 matrix to form a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> hcat(Matrix<Element, 4, 1> const & rhs) const {
return Matrix<Element, 4, 4>::hcat(*this, rhs);
}
/// Forms a 4-by-3 matrix by vertically concatenating a 1-by-3 matrix with a 3-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 3> const & upper, Matrix<Element, 3, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2)
, lower.at(2, 0), lower.at(2, 1), lower.at(2, 2));
}
/// Forms a 4-by-3 matrix by vertically concatenating a 2-by-3 matrix with a 2-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 3> const & upper, Matrix<Element, 2, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2));
}
/// Forms a 4-by-3 matrix by vertically concatenating a 3-by-3 matrix with a 1-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 3> const & upper, Matrix<Element, 1, 3> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2)
, upper.at(2, 0), upper.at(2, 1), upper.at(2, 2)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2));
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 2> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1)
, C.at(0, 0), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), D.at(1, 0), D.at(1, 1)
, C.at(2, 0), D.at(2, 0), D.at(2, 1)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Element B,
Matrix<Element, 3, 2> const & C, Matrix<Element, 3, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B
, C.at(0, 0), C.at(0, 1), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), D.at(1, 0)
, C.at(2, 0), C.at(2, 1), D.at(2, 0)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 2> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), B.at(1, 0), B.at(1, 1)
, C.at(0, 0), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), D.at(1, 0)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 2> const & B,
Element C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), B.at(1, 0), B.at(1, 1)
, A.at(2, 0), B.at(2, 0), B.at(2, 1)
, C, D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 4-by-3 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 2> const & A, Matrix<Element, 3, 1> const & B,
Matrix<Element, 1, 2> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), B.at(1, 0)
, A.at(2, 0), A.at(2, 1), B.at(2, 0)
, C.at(0, 0), C.at(0, 1), D
);
}
/// Elementwise add operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
result.data[9] = data[9] + rhs.data[9];
result.data[10] = data[10] + rhs.data[10];
result.data[11] = data[11] + rhs.data[11];
return result;
}
/// Elementwise add operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
data[9] += rhs.data[9];
data[10] += rhs.data[10];
data[11] += rhs.data[11];
return *this;
}
/// Elementwise subtract operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
result.data[9] = data[9] - rhs.data[9];
result.data[10] = data[10] - rhs.data[10];
result.data[11] = data[11] - rhs.data[11];
return result;
}
/// Elementwise subtract operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
data[9] -= rhs.data[9];
data[10] -= rhs.data[10];
data[11] -= rhs.data[11];
return *this;
}
/// Elementwise multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
result.data[9] = data[9] * rhs.data[9];
result.data[10] = data[10] * rhs.data[10];
result.data[11] = data[11] * rhs.data[11];
return result;
}
/// Scalar multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
result.data[9] = data[9] * s;
result.data[10] = data[10] * s;
result.data[11] = data[11] * s;
return result;
}
/// Scalar multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
data[9] *= s;
data[10] *= s;
data[11] *= s;
return *this;
}
/// Elementwise divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
result.data[9] = data[9] / rhs.data[9];
result.data[10] = data[10] / rhs.data[10];
result.data[11] = data[11] / rhs.data[11];
return result;
}
/// Scalar divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
result.data[9] = data[9] / s;
result.data[10] = data[10] / s;
result.data[11] = data[11] / s;
return result;
}
/// Scalar divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
data[9] /= s;
data[10] /= s;
data[11] /= s;
return *this;
}
/// Elementwise divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-3)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
data[9] /= rhs.data[9];
data[10] /= rhs.data[10];
data[11] /= rhs.data[11];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
m.data[9] = -m.data[9];
m.data[10] = -m.data[10];
m.data[11] = -m.data[11];
return m;
}
/// Matrix product of size 4-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 3, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[3] * rhs.data[0];
accum.data[2] += data[6] * rhs.data[0];
accum.data[3] += data[9] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[4] * rhs.data[1];
accum.data[2] += data[7] * rhs.data[1];
accum.data[3] += data[10] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[5] * rhs.data[2];
accum.data[2] += data[8] * rhs.data[2];
accum.data[3] += data[11] * rhs.data[2];
return accum;
}
/// Matrix product of size 4-by-1-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 3, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 3, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[3] * rhs.data[0];
accum.data[3] += data[3] * rhs.data[1];
accum.data[4] += data[6] * rhs.data[0];
accum.data[5] += data[6] * rhs.data[1];
accum.data[6] += data[9] * rhs.data[0];
accum.data[7] += data[9] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[4] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[7] * rhs.data[2];
accum.data[5] += data[7] * rhs.data[3];
accum.data[6] += data[10] * rhs.data[2];
accum.data[7] += data[10] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[5] * rhs.data[4];
accum.data[3] += data[5] * rhs.data[5];
accum.data[4] += data[8] * rhs.data[4];
accum.data[5] += data[8] * rhs.data[5];
accum.data[6] += data[11] * rhs.data[4];
accum.data[7] += data[11] * rhs.data[5];
return accum;
}
/// Matrix product of size 4-by-2-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 3, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 3, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[3] * rhs.data[0];
accum.data[4] += data[3] * rhs.data[1];
accum.data[5] += data[3] * rhs.data[2];
accum.data[6] += data[6] * rhs.data[0];
accum.data[7] += data[6] * rhs.data[1];
accum.data[8] += data[6] * rhs.data[2];
accum.data[9] += data[9] * rhs.data[0];
accum.data[10] += data[9] * rhs.data[1];
accum.data[11] += data[9] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[4] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[7] * rhs.data[3];
accum.data[7] += data[7] * rhs.data[4];
accum.data[8] += data[7] * rhs.data[5];
accum.data[9] += data[10] * rhs.data[3];
accum.data[10] += data[10] * rhs.data[4];
accum.data[11] += data[10] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[5] * rhs.data[6];
accum.data[4] += data[5] * rhs.data[7];
accum.data[5] += data[5] * rhs.data[8];
accum.data[6] += data[8] * rhs.data[6];
accum.data[7] += data[8] * rhs.data[7];
accum.data[8] += data[8] * rhs.data[8];
accum.data[9] += data[11] * rhs.data[6];
accum.data[10] += data[11] * rhs.data[7];
accum.data[11] += data[11] * rhs.data[8];
return accum;
}
/// Matrix product of size 4-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 3, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-3
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 3, 3> const &rhs) {
*this = product(rhs);
return *this;
}
/// Matrix product of size 4-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 3, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[3] * rhs.data[0];
accum.data[5] += data[3] * rhs.data[1];
accum.data[6] += data[3] * rhs.data[2];
accum.data[7] += data[3] * rhs.data[3];
accum.data[8] += data[6] * rhs.data[0];
accum.data[9] += data[6] * rhs.data[1];
accum.data[10] += data[6] * rhs.data[2];
accum.data[11] += data[6] * rhs.data[3];
accum.data[12] += data[9] * rhs.data[0];
accum.data[13] += data[9] * rhs.data[1];
accum.data[14] += data[9] * rhs.data[2];
accum.data[15] += data[9] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[4] * rhs.data[4];
accum.data[5] += data[4] * rhs.data[5];
accum.data[6] += data[4] * rhs.data[6];
accum.data[7] += data[4] * rhs.data[7];
accum.data[8] += data[7] * rhs.data[4];
accum.data[9] += data[7] * rhs.data[5];
accum.data[10] += data[7] * rhs.data[6];
accum.data[11] += data[7] * rhs.data[7];
accum.data[12] += data[10] * rhs.data[4];
accum.data[13] += data[10] * rhs.data[5];
accum.data[14] += data[10] * rhs.data[6];
accum.data[15] += data[10] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[5] * rhs.data[8];
accum.data[5] += data[5] * rhs.data[9];
accum.data[6] += data[5] * rhs.data[10];
accum.data[7] += data[5] * rhs.data[11];
accum.data[8] += data[8] * rhs.data[8];
accum.data[9] += data[8] * rhs.data[9];
accum.data[10] += data[8] * rhs.data[10];
accum.data[11] += data[8] * rhs.data[11];
accum.data[12] += data[11] * rhs.data[8];
accum.data[13] += data[11] * rhs.data[9];
accum.data[14] += data[11] * rhs.data[10];
accum.data[15] += data[11] * rhs.data[11];
return accum;
}
/// Matrix product of size 4-by-4-by-3
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 3, 4> const &rhs) const {
return product(rhs);
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
accum += data[9];
accum += data[10];
accum += data[11];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
accum += data[9] * data[9];
accum += data[10] * data[10];
accum += data[11] * data[11];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[4];
accum += data[8];
return accum;
}
};
/// Template alias for 4-by-3 matrix
template <typename Element>
using Matrix4x3 = Matrix<Element, 4, 3>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x3<Element> make_Matrix4x3(
Element _0_0, Element _0_1, Element _0_2,
Element _1_0, Element _1_1, Element _1_2,
Element _2_0, Element _2_1, Element _2_2,
Element _3_0, Element _3_1, Element _3_2
) {
return Matrix4x3<Element>(
_0_0, _0_1, _0_2,
_1_0, _1_1, _1_2,
_2_0, _2_1, _2_2,
_3_0, _3_1, _3_2
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-by-4 matrix template class definition
template <typename Element_>
struct Matrix<Element_, 4, 4> {
//
// Type definitions
//
/// Element data type
using Element = Element_;
/// Number of rows in matrix
static int const kRows = 4;
/// Number of columns in matrix
static int const kColumns = 4;
/// Layout of matrix in underlying array
using Layout = layout::RowMajor;
/// Number of elements in matrix
static int const kCount = 16;
//
// Data members
//
/// Elements of the matrix in row-major layout
Array<Element, kCount> data;
//
// Methods
//
/// Constructs a zero matrix
CUTLASS_HOST_DEVICE
Matrix() {
data.clear();
}
/// Copy constructor for a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Matrix(Matrix const &rhs) {
data = rhs.data;
}
/// Constucts a 4-by-4 matrix from scalar elements
CUTLASS_HOST_DEVICE
Matrix(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3,
Element _3_0, Element _3_1, Element _3_2, Element _3_3
) {
data[0] = _0_0; data[1] = _0_1; data[2] = _0_2; data[3] = _0_3;
data[4] = _1_0; data[5] = _1_1; data[6] = _1_2; data[7] = _1_3;
data[8] = _2_0; data[9] = _2_1; data[10] = _2_2; data[11] = _2_3;
data[12] = _3_0; data[13] = _3_1; data[14] = _3_2; data[15] = _3_3;
}
/// Constucts a 4-by-4 matrix from row vectors
CUTLASS_HOST_DEVICE
Matrix(
Matrix<Element, 1, 4> const &row_0,
Matrix<Element, 1, 4> const &row_1,
Matrix<Element, 1, 4> const &row_2,
Matrix<Element, 1, 4> const &row_3
) {
data[0] = row_0.data[0];
data[1] = row_0.data[1];
data[2] = row_0.data[2];
data[3] = row_0.data[3];
data[4] = row_1.data[0];
data[5] = row_1.data[1];
data[6] = row_1.data[2];
data[7] = row_1.data[3];
data[8] = row_2.data[0];
data[9] = row_2.data[1];
data[10] = row_2.data[2];
data[11] = row_2.data[3];
data[12] = row_3.data[0];
data[13] = row_3.data[1];
data[14] = row_3.data[2];
data[15] = row_3.data[3];
}
/// Static method to construct a 4-by-4 matrix from column vectors
CUTLASS_HOST_DEVICE
static Matrix from_columns(
Matrix<Element, 4, 1> const &column_0,
Matrix<Element, 4, 1> const &column_1,
Matrix<Element, 4, 1> const &column_2,
Matrix<Element, 4, 1> const &column_3
) {
Matrix result;
result.data[0] = column_0.data[0];
result.data[1] = column_1.data[0];
result.data[2] = column_2.data[0];
result.data[3] = column_3.data[0];
result.data[4] = column_0.data[1];
result.data[5] = column_1.data[1];
result.data[6] = column_2.data[1];
result.data[7] = column_3.data[1];
result.data[8] = column_0.data[2];
result.data[9] = column_1.data[2];
result.data[10] = column_2.data[2];
result.data[11] = column_3.data[2];
result.data[12] = column_0.data[3];
result.data[13] = column_1.data[3];
result.data[14] = column_2.data[3];
result.data[15] = column_3.data[3];
return result;
}
/// Constructs an identity matrix
CUTLASS_HOST_DEVICE
static Matrix identity() {
Matrix m;
m.data[0] = Element(1);
m.data[5] = Element(1);
m.data[10] = Element(1);
m.data[15] = Element(1);
return m;
}
/// Constructs a matrix from a uniform element
CUTLASS_HOST_DEVICE
static Matrix uniform(Element s) {
Matrix m;
m.data[0] = s;
m.data[1] = s;
m.data[2] = s;
m.data[3] = s;
m.data[4] = s;
m.data[5] = s;
m.data[6] = s;
m.data[7] = s;
m.data[8] = s;
m.data[9] = s;
m.data[10] = s;
m.data[11] = s;
m.data[12] = s;
m.data[13] = s;
m.data[14] = s;
m.data[15] = s;
return m;
}
/// Constructs a matrix from a uniform element 1
CUTLASS_HOST_DEVICE
static Matrix ones() {
return uniform(Element(1));
}
/// Constructs a matrix from a uniform element 0
CUTLASS_HOST_DEVICE
static Matrix zero() {
return Matrix();
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 4, 1> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Constructs a matrix from elements along its diagonal
CUTLASS_HOST_DEVICE
static Matrix from_diagonal(Matrix<Element, 1, 4> const &diag) {
Matrix m;
m.data[0] = diag.data[0];
m.data[5] = diag.data[1];
m.data[10] = diag.data[2];
m.data[15] = diag.data[3];
return m;
}
/// Gets an array of diagonal elements
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> diagonal() const {
Matrix<Element, 4, 1> diag;
diag.data[0] = data[0];
diag.data[1] = data[5];
diag.data[2] = data[10];
diag.data[3] = data[15];
return diag;
}
/// Returns a transposed matrix
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> transpose() const {
Matrix<Element, 4, 4> mt;
mt.data[0] = data[0];
mt.data[4] = data[1];
mt.data[8] = data[2];
mt.data[12] = data[3];
mt.data[1] = data[4];
mt.data[5] = data[5];
mt.data[9] = data[6];
mt.data[13] = data[7];
mt.data[2] = data[8];
mt.data[6] = data[9];
mt.data[10] = data[10];
mt.data[14] = data[11];
mt.data[3] = data[12];
mt.data[7] = data[13];
mt.data[11] = data[14];
mt.data[15] = data[15];
return mt;
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(int i, int j) const {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(int i, int j) {
return data[i * 4 + j];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element at(Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & at(Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element &at(int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element at(int offset) const {
return data[offset];
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element operator[](Coord<2> const &coord) const {
return at(coord[0], coord[1]);
}
/// Accesses an element by coordinate
CUTLASS_HOST_DEVICE
Element & operator[](Coord<2> const &coord) {
return at(coord[0], coord[1]);
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element & operator[](int offset) {
return data[offset];
}
/// Accesses an element by offset
CUTLASS_HOST_DEVICE
Element operator[](int offset) const {
return data[offset];
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 2> slice_1x2(int i = 0, int j = 0) const {
Matrix<Element, 1, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x2(Matrix<Element, 1, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 3> slice_1x3(int i = 0, int j = 0) const {
Matrix<Element, 1, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x3(Matrix<Element, 1, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> slice_1x4(int i = 0, int j = 0) const {
Matrix<Element, 1, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_1x4(Matrix<Element, 1, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 1, 4> row(int i) const {
return slice_1x4(i, 0);
}
CUTLASS_HOST_DEVICE
Matrix &set_row(Matrix<Element, 1, 4> const &v, int i = 0) {
return set_slice_1x4(v, i, 0);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 1> slice_2x1(int i = 0, int j = 0) const {
Matrix<Element, 2, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x1(Matrix<Element, 2, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 2> slice_2x2(int i = 0, int j = 0) const {
Matrix<Element, 2, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x2(Matrix<Element, 2, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 3> slice_2x3(int i = 0, int j = 0) const {
Matrix<Element, 2, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x3(Matrix<Element, 2, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 2, 4> slice_2x4(int i = 0, int j = 0) const {
Matrix<Element, 2, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_2x4(Matrix<Element, 2, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 1> slice_3x1(int i = 0, int j = 0) const {
Matrix<Element, 3, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
m.data[2] = data[i * 4 + j + 8];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x1(Matrix<Element, 3, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
data[i * 4 + j + 8] = m.data[2];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 2> slice_3x2(int i = 0, int j = 0) const {
Matrix<Element, 3, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
m.data[4] = data[i * 4 + j + 8];
m.data[5] = data[i * 4 + j + 9];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x2(Matrix<Element, 3, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
data[i * 4 + j + 8] = m.data[4];
data[i * 4 + j + 9] = m.data[5];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 3> slice_3x3(int i = 0, int j = 0) const {
Matrix<Element, 3, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
m.data[6] = data[i * 4 + j + 8];
m.data[7] = data[i * 4 + j + 9];
m.data[8] = data[i * 4 + j + 10];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x3(Matrix<Element, 3, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
data[i * 4 + j + 8] = m.data[6];
data[i * 4 + j + 9] = m.data[7];
data[i * 4 + j + 10] = m.data[8];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 3, 4> slice_3x4(int i = 0, int j = 0) const {
Matrix<Element, 3, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
m.data[8] = data[i * 4 + j + 8];
m.data[9] = data[i * 4 + j + 9];
m.data[10] = data[i * 4 + j + 10];
m.data[11] = data[i * 4 + j + 11];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_3x4(Matrix<Element, 3, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
data[i * 4 + j + 8] = m.data[8];
data[i * 4 + j + 9] = m.data[9];
data[i * 4 + j + 10] = m.data[10];
data[i * 4 + j + 11] = m.data[11];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> slice_4x1(int i = 0, int j = 0) const {
Matrix<Element, 4, 1> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 4];
m.data[2] = data[i * 4 + j + 8];
m.data[3] = data[i * 4 + j + 12];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x1(Matrix<Element, 4, 1> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 4] = m.data[1];
data[i * 4 + j + 8] = m.data[2];
data[i * 4 + j + 12] = m.data[3];
return *this;
}
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> column(int j) const {
return slice_4x1(0, j);
}
CUTLASS_HOST_DEVICE
Matrix &set_column(Matrix<Element, 4, 1> const &v, int j =0) {
return set_slice_4x1(v, 0, j);
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> slice_4x2(int i = 0, int j = 0) const {
Matrix<Element, 4, 2> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 4];
m.data[3] = data[i * 4 + j + 5];
m.data[4] = data[i * 4 + j + 8];
m.data[5] = data[i * 4 + j + 9];
m.data[6] = data[i * 4 + j + 12];
m.data[7] = data[i * 4 + j + 13];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x2(Matrix<Element, 4, 2> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 4] = m.data[2];
data[i * 4 + j + 5] = m.data[3];
data[i * 4 + j + 8] = m.data[4];
data[i * 4 + j + 9] = m.data[5];
data[i * 4 + j + 12] = m.data[6];
data[i * 4 + j + 13] = m.data[7];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> slice_4x3(int i = 0, int j = 0) const {
Matrix<Element, 4, 3> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 4];
m.data[4] = data[i * 4 + j + 5];
m.data[5] = data[i * 4 + j + 6];
m.data[6] = data[i * 4 + j + 8];
m.data[7] = data[i * 4 + j + 9];
m.data[8] = data[i * 4 + j + 10];
m.data[9] = data[i * 4 + j + 12];
m.data[10] = data[i * 4 + j + 13];
m.data[11] = data[i * 4 + j + 14];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x3(Matrix<Element, 4, 3> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 4] = m.data[3];
data[i * 4 + j + 5] = m.data[4];
data[i * 4 + j + 6] = m.data[5];
data[i * 4 + j + 8] = m.data[6];
data[i * 4 + j + 9] = m.data[7];
data[i * 4 + j + 10] = m.data[8];
data[i * 4 + j + 12] = m.data[9];
data[i * 4 + j + 13] = m.data[10];
data[i * 4 + j + 14] = m.data[11];
return *this;
}
/// Gets a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> slice_4x4(int i = 0, int j = 0) const {
Matrix<Element, 4, 4> m;
m.data[0] = data[i * 4 + j + 0];
m.data[1] = data[i * 4 + j + 1];
m.data[2] = data[i * 4 + j + 2];
m.data[3] = data[i * 4 + j + 3];
m.data[4] = data[i * 4 + j + 4];
m.data[5] = data[i * 4 + j + 5];
m.data[6] = data[i * 4 + j + 6];
m.data[7] = data[i * 4 + j + 7];
m.data[8] = data[i * 4 + j + 8];
m.data[9] = data[i * 4 + j + 9];
m.data[10] = data[i * 4 + j + 10];
m.data[11] = data[i * 4 + j + 11];
m.data[12] = data[i * 4 + j + 12];
m.data[13] = data[i * 4 + j + 13];
m.data[14] = data[i * 4 + j + 14];
m.data[15] = data[i * 4 + j + 15];
return m;
}
/// Overwrites a submatrix with optional offset
CUTLASS_HOST_DEVICE
Matrix & set_slice_4x4(Matrix<Element, 4, 4> const &m, int i = 0, int j = 0) {
data[i * 4 + j + 0] = m.data[0];
data[i * 4 + j + 1] = m.data[1];
data[i * 4 + j + 2] = m.data[2];
data[i * 4 + j + 3] = m.data[3];
data[i * 4 + j + 4] = m.data[4];
data[i * 4 + j + 5] = m.data[5];
data[i * 4 + j + 6] = m.data[6];
data[i * 4 + j + 7] = m.data[7];
data[i * 4 + j + 8] = m.data[8];
data[i * 4 + j + 9] = m.data[9];
data[i * 4 + j + 10] = m.data[10];
data[i * 4 + j + 11] = m.data[11];
data[i * 4 + j + 12] = m.data[12];
data[i * 4 + j + 13] = m.data[13];
data[i * 4 + j + 14] = m.data[14];
data[i * 4 + j + 15] = m.data[15];
return *this;
}
/// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-1 matrix with a 4-by-3 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 1> const & lhs, Matrix<Element, 4, 3> const & rhs) {
return Matrix(
lhs.at(0, 0), rhs.at(0, 0), rhs.at(0, 1), rhs.at(0, 2)
, lhs.at(1, 0), rhs.at(1, 0), rhs.at(1, 1), rhs.at(1, 2)
, lhs.at(2, 0), rhs.at(2, 0), rhs.at(2, 1), rhs.at(2, 2)
, lhs.at(3, 0), rhs.at(3, 0), rhs.at(3, 1), rhs.at(3, 2));
}
/// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-2 matrix with a 4-by-2 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 2> const & lhs, Matrix<Element, 4, 2> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), rhs.at(0, 0), rhs.at(0, 1)
, lhs.at(1, 0), lhs.at(1, 1), rhs.at(1, 0), rhs.at(1, 1)
, lhs.at(2, 0), lhs.at(2, 1), rhs.at(2, 0), rhs.at(2, 1)
, lhs.at(3, 0), lhs.at(3, 1), rhs.at(3, 0), rhs.at(3, 1));
}
/// Forms a 4-by-4 matrix by horizontally concatenating a 4-by-3 matrix with a 4-by-1 matrix
CUTLASS_HOST_DEVICE
static Matrix hcat(Matrix<Element, 4, 3> const & lhs, Matrix<Element, 4, 1> const & rhs) {
return Matrix(
lhs.at(0, 0), lhs.at(0, 1), lhs.at(0, 2), rhs.at(0, 0)
, lhs.at(1, 0), lhs.at(1, 1), lhs.at(1, 2), rhs.at(1, 0)
, lhs.at(2, 0), lhs.at(2, 1), lhs.at(2, 2), rhs.at(2, 0)
, lhs.at(3, 0), lhs.at(3, 1), lhs.at(3, 2), rhs.at(3, 0));
}
/// Forms a 4-by-4 matrix by vertically concatenating a 1-by-4 matrix with a 3-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 1, 4> const & upper, Matrix<Element, 3, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3)
, lower.at(2, 0), lower.at(2, 1), lower.at(2, 2), lower.at(2, 3));
}
/// Forms a 4-by-4 matrix by vertically concatenating a 2-by-4 matrix with a 2-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 2, 4> const & upper, Matrix<Element, 2, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3)
, lower.at(1, 0), lower.at(1, 1), lower.at(1, 2), lower.at(1, 3));
}
/// Forms a 4-by-4 matrix by vertically concatenating a 3-by-4 matrix with a 1-by-4 matrix
CUTLASS_HOST_DEVICE
static Matrix vcat(Matrix<Element, 3, 4> const & upper, Matrix<Element, 1, 4> const & lower) {
return Matrix(
upper.at(0, 0), upper.at(0, 1), upper.at(0, 2), upper.at(0, 3)
, upper.at(1, 0), upper.at(1, 1), upper.at(1, 2), upper.at(1, 3)
, upper.at(2, 0), upper.at(2, 1), upper.at(2, 2), upper.at(2, 3)
, lower.at(0, 0), lower.at(0, 1), lower.at(0, 2), lower.at(0, 3));
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Element A, Matrix<Element, 1, 3> const & B,
Matrix<Element, 3, 1> const & C, Matrix<Element, 3, 3> const & D) {
return Matrix(
A, B.at(0, 0), B.at(0, 1), B.at(0, 2)
, C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2)
, C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2)
, C.at(2, 0), D.at(2, 0), D.at(2, 1), D.at(2, 2)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 2> const & A, Matrix<Element, 1, 2> const & B,
Matrix<Element, 3, 2> const & C, Matrix<Element, 3, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1)
, C.at(2, 0), C.at(2, 1), D.at(2, 0), D.at(2, 1)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 1, 3> const & A, Element B,
Matrix<Element, 3, 3> const & C, Matrix<Element, 3, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0)
, C.at(2, 0), C.at(2, 1), C.at(2, 2), D.at(2, 0)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 1> const & A, Matrix<Element, 2, 3> const & B,
Matrix<Element, 2, 1> const & C, Matrix<Element, 2, 3> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2)
, A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2)
, C.at(0, 0), D.at(0, 0), D.at(0, 1), D.at(0, 2)
, C.at(1, 0), D.at(1, 0), D.at(1, 1), D.at(1, 2)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 2> const & A, Matrix<Element, 2, 2> const & B,
Matrix<Element, 2, 2> const & C, Matrix<Element, 2, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
, C.at(1, 0), C.at(1, 1), D.at(1, 0), D.at(1, 1)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 2, 3> const & A, Matrix<Element, 2, 1> const & B,
Matrix<Element, 2, 3> const & C, Matrix<Element, 2, 1> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0)
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D.at(0, 0)
, C.at(1, 0), C.at(1, 1), C.at(1, 2), D.at(1, 0)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 1> const & A, Matrix<Element, 3, 3> const & B,
Element C, Matrix<Element, 1, 3> const & D) {
return Matrix(
A.at(0, 0), B.at(0, 0), B.at(0, 1), B.at(0, 2)
, A.at(1, 0), B.at(1, 0), B.at(1, 1), B.at(1, 2)
, A.at(2, 0), B.at(2, 0), B.at(2, 1), B.at(2, 2)
, C, D.at(0, 0), D.at(0, 1), D.at(0, 2)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 2> const & A, Matrix<Element, 3, 2> const & B,
Matrix<Element, 1, 2> const & C, Matrix<Element, 1, 2> const & D) {
return Matrix(
A.at(0, 0), A.at(0, 1), B.at(0, 0), B.at(0, 1)
, A.at(1, 0), A.at(1, 1), B.at(1, 0), B.at(1, 1)
, A.at(2, 0), A.at(2, 1), B.at(2, 0), B.at(2, 1)
, C.at(0, 0), C.at(0, 1), D.at(0, 0), D.at(0, 1)
);
}
/// Forms a 4-by-4 matrix by concatenating four components
CUTLASS_HOST_DEVICE
static Matrix block(
Matrix<Element, 3, 3> const & A, Matrix<Element, 3, 1> const & B,
Matrix<Element, 1, 3> const & C, Element D) {
return Matrix(
A.at(0, 0), A.at(0, 1), A.at(0, 2), B.at(0, 0)
, A.at(1, 0), A.at(1, 1), A.at(1, 2), B.at(1, 0)
, A.at(2, 0), A.at(2, 1), A.at(2, 2), B.at(2, 0)
, C.at(0, 0), C.at(0, 1), C.at(0, 2), D
);
}
/// Elementwise add operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix add(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] + rhs.data[0];
result.data[1] = data[1] + rhs.data[1];
result.data[2] = data[2] + rhs.data[2];
result.data[3] = data[3] + rhs.data[3];
result.data[4] = data[4] + rhs.data[4];
result.data[5] = data[5] + rhs.data[5];
result.data[6] = data[6] + rhs.data[6];
result.data[7] = data[7] + rhs.data[7];
result.data[8] = data[8] + rhs.data[8];
result.data[9] = data[9] + rhs.data[9];
result.data[10] = data[10] + rhs.data[10];
result.data[11] = data[11] + rhs.data[11];
result.data[12] = data[12] + rhs.data[12];
result.data[13] = data[13] + rhs.data[13];
result.data[14] = data[14] + rhs.data[14];
result.data[15] = data[15] + rhs.data[15];
return result;
}
/// Elementwise add operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator +(Matrix const &rhs) const {
return add(rhs);
}
/// Elementwise add operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator +=(Matrix const &rhs) {
data[0] += rhs.data[0];
data[1] += rhs.data[1];
data[2] += rhs.data[2];
data[3] += rhs.data[3];
data[4] += rhs.data[4];
data[5] += rhs.data[5];
data[6] += rhs.data[6];
data[7] += rhs.data[7];
data[8] += rhs.data[8];
data[9] += rhs.data[9];
data[10] += rhs.data[10];
data[11] += rhs.data[11];
data[12] += rhs.data[12];
data[13] += rhs.data[13];
data[14] += rhs.data[14];
data[15] += rhs.data[15];
return *this;
}
/// Elementwise subtract operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix subtract(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] - rhs.data[0];
result.data[1] = data[1] - rhs.data[1];
result.data[2] = data[2] - rhs.data[2];
result.data[3] = data[3] - rhs.data[3];
result.data[4] = data[4] - rhs.data[4];
result.data[5] = data[5] - rhs.data[5];
result.data[6] = data[6] - rhs.data[6];
result.data[7] = data[7] - rhs.data[7];
result.data[8] = data[8] - rhs.data[8];
result.data[9] = data[9] - rhs.data[9];
result.data[10] = data[10] - rhs.data[10];
result.data[11] = data[11] - rhs.data[11];
result.data[12] = data[12] - rhs.data[12];
result.data[13] = data[13] - rhs.data[13];
result.data[14] = data[14] - rhs.data[14];
result.data[15] = data[15] - rhs.data[15];
return result;
}
/// Elementwise subtract operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator -(Matrix const &rhs) const {
return subtract(rhs);
}
/// Elementwise subtract operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator -=(Matrix const &rhs) {
data[0] -= rhs.data[0];
data[1] -= rhs.data[1];
data[2] -= rhs.data[2];
data[3] -= rhs.data[3];
data[4] -= rhs.data[4];
data[5] -= rhs.data[5];
data[6] -= rhs.data[6];
data[7] -= rhs.data[7];
data[8] -= rhs.data[8];
data[9] -= rhs.data[9];
data[10] -= rhs.data[10];
data[11] -= rhs.data[11];
data[12] -= rhs.data[12];
data[13] -= rhs.data[13];
data[14] -= rhs.data[14];
data[15] -= rhs.data[15];
return *this;
}
/// Elementwise multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] * rhs.data[0];
result.data[1] = data[1] * rhs.data[1];
result.data[2] = data[2] * rhs.data[2];
result.data[3] = data[3] * rhs.data[3];
result.data[4] = data[4] * rhs.data[4];
result.data[5] = data[5] * rhs.data[5];
result.data[6] = data[6] * rhs.data[6];
result.data[7] = data[7] * rhs.data[7];
result.data[8] = data[8] * rhs.data[8];
result.data[9] = data[9] * rhs.data[9];
result.data[10] = data[10] * rhs.data[10];
result.data[11] = data[11] * rhs.data[11];
result.data[12] = data[12] * rhs.data[12];
result.data[13] = data[13] * rhs.data[13];
result.data[14] = data[14] * rhs.data[14];
result.data[15] = data[15] * rhs.data[15];
return result;
}
/// Scalar multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix multiply(Element const &s) const {
Matrix result;
result.data[0] = data[0] * s;
result.data[1] = data[1] * s;
result.data[2] = data[2] * s;
result.data[3] = data[3] * s;
result.data[4] = data[4] * s;
result.data[5] = data[5] * s;
result.data[6] = data[6] * s;
result.data[7] = data[7] * s;
result.data[8] = data[8] * s;
result.data[9] = data[9] * s;
result.data[10] = data[10] * s;
result.data[11] = data[11] * s;
result.data[12] = data[12] * s;
result.data[13] = data[13] * s;
result.data[14] = data[14] * s;
result.data[15] = data[15] * s;
return result;
}
/// Scalar multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator *(Element const &s) const {
return multiply(s);
}
/// Scalar multiply operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator *=(Element const &s) {
data[0] *= s;
data[1] *= s;
data[2] *= s;
data[3] *= s;
data[4] *= s;
data[5] *= s;
data[6] *= s;
data[7] *= s;
data[8] *= s;
data[9] *= s;
data[10] *= s;
data[11] *= s;
data[12] *= s;
data[13] *= s;
data[14] *= s;
data[15] *= s;
return *this;
}
/// Elementwise divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Matrix const &rhs) const {
Matrix result;
result.data[0] = data[0] / rhs.data[0];
result.data[1] = data[1] / rhs.data[1];
result.data[2] = data[2] / rhs.data[2];
result.data[3] = data[3] / rhs.data[3];
result.data[4] = data[4] / rhs.data[4];
result.data[5] = data[5] / rhs.data[5];
result.data[6] = data[6] / rhs.data[6];
result.data[7] = data[7] / rhs.data[7];
result.data[8] = data[8] / rhs.data[8];
result.data[9] = data[9] / rhs.data[9];
result.data[10] = data[10] / rhs.data[10];
result.data[11] = data[11] / rhs.data[11];
result.data[12] = data[12] / rhs.data[12];
result.data[13] = data[13] / rhs.data[13];
result.data[14] = data[14] / rhs.data[14];
result.data[15] = data[15] / rhs.data[15];
return result;
}
/// Scalar divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix divide(Element const &s) const {
Matrix result;
result.data[0] = data[0] / s;
result.data[1] = data[1] / s;
result.data[2] = data[2] / s;
result.data[3] = data[3] / s;
result.data[4] = data[4] / s;
result.data[5] = data[5] / s;
result.data[6] = data[6] / s;
result.data[7] = data[7] / s;
result.data[8] = data[8] / s;
result.data[9] = data[9] / s;
result.data[10] = data[10] / s;
result.data[11] = data[11] / s;
result.data[12] = data[12] / s;
result.data[13] = data[13] / s;
result.data[14] = data[14] / s;
result.data[15] = data[15] / s;
return result;
}
/// Scalar divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Element const &s) const {
return divide(s);
}
/// Scalar divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Element const &s) {
data[0] /= s;
data[1] /= s;
data[2] /= s;
data[3] /= s;
data[4] /= s;
data[5] /= s;
data[6] /= s;
data[7] /= s;
data[8] /= s;
data[9] /= s;
data[10] /= s;
data[11] /= s;
data[12] /= s;
data[13] /= s;
data[14] /= s;
data[15] /= s;
return *this;
}
/// Elementwise divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix operator /(Matrix const &rhs) const {
return divide(rhs);
}
/// Elementwise divide operator (4-by-4)
CUTLASS_HOST_DEVICE
Matrix & operator /=(Matrix const &rhs) {
data[0] /= rhs.data[0];
data[1] /= rhs.data[1];
data[2] /= rhs.data[2];
data[3] /= rhs.data[3];
data[4] /= rhs.data[4];
data[5] /= rhs.data[5];
data[6] /= rhs.data[6];
data[7] /= rhs.data[7];
data[8] /= rhs.data[8];
data[9] /= rhs.data[9];
data[10] /= rhs.data[10];
data[11] /= rhs.data[11];
data[12] /= rhs.data[12];
data[13] /= rhs.data[13];
data[14] /= rhs.data[14];
data[15] /= rhs.data[15];
return *this;
}
/// Negates each element of the matrix
CUTLASS_HOST_DEVICE
Matrix operator-() const {
Matrix m;
m.data[0] = -m.data[0];
m.data[1] = -m.data[1];
m.data[2] = -m.data[2];
m.data[3] = -m.data[3];
m.data[4] = -m.data[4];
m.data[5] = -m.data[5];
m.data[6] = -m.data[6];
m.data[7] = -m.data[7];
m.data[8] = -m.data[8];
m.data[9] = -m.data[9];
m.data[10] = -m.data[10];
m.data[11] = -m.data[11];
m.data[12] = -m.data[12];
m.data[13] = -m.data[13];
m.data[14] = -m.data[14];
m.data[15] = -m.data[15];
return m;
}
/// Matrix product of size 4-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> product(
Matrix<Element, 4, 1> const &rhs,
Matrix<Element, 4, 1> accum = Matrix<Element, 4, 1>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[4] * rhs.data[0];
accum.data[2] += data[8] * rhs.data[0];
accum.data[3] += data[12] * rhs.data[0];
// k=1
accum.data[0] += data[1] * rhs.data[1];
accum.data[1] += data[5] * rhs.data[1];
accum.data[2] += data[9] * rhs.data[1];
accum.data[3] += data[13] * rhs.data[1];
// k=2
accum.data[0] += data[2] * rhs.data[2];
accum.data[1] += data[6] * rhs.data[2];
accum.data[2] += data[10] * rhs.data[2];
accum.data[3] += data[14] * rhs.data[2];
// k=3
accum.data[0] += data[3] * rhs.data[3];
accum.data[1] += data[7] * rhs.data[3];
accum.data[2] += data[11] * rhs.data[3];
accum.data[3] += data[15] * rhs.data[3];
return accum;
}
/// Matrix product of size 4-by-1-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 1> operator*(Matrix<Element, 4, 1> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> product(
Matrix<Element, 4, 2> const &rhs,
Matrix<Element, 4, 2> accum = Matrix<Element, 4, 2>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[4] * rhs.data[0];
accum.data[3] += data[4] * rhs.data[1];
accum.data[4] += data[8] * rhs.data[0];
accum.data[5] += data[8] * rhs.data[1];
accum.data[6] += data[12] * rhs.data[0];
accum.data[7] += data[12] * rhs.data[1];
// k=1
accum.data[0] += data[1] * rhs.data[2];
accum.data[1] += data[1] * rhs.data[3];
accum.data[2] += data[5] * rhs.data[2];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[9] * rhs.data[2];
accum.data[5] += data[9] * rhs.data[3];
accum.data[6] += data[13] * rhs.data[2];
accum.data[7] += data[13] * rhs.data[3];
// k=2
accum.data[0] += data[2] * rhs.data[4];
accum.data[1] += data[2] * rhs.data[5];
accum.data[2] += data[6] * rhs.data[4];
accum.data[3] += data[6] * rhs.data[5];
accum.data[4] += data[10] * rhs.data[4];
accum.data[5] += data[10] * rhs.data[5];
accum.data[6] += data[14] * rhs.data[4];
accum.data[7] += data[14] * rhs.data[5];
// k=3
accum.data[0] += data[3] * rhs.data[6];
accum.data[1] += data[3] * rhs.data[7];
accum.data[2] += data[7] * rhs.data[6];
accum.data[3] += data[7] * rhs.data[7];
accum.data[4] += data[11] * rhs.data[6];
accum.data[5] += data[11] * rhs.data[7];
accum.data[6] += data[15] * rhs.data[6];
accum.data[7] += data[15] * rhs.data[7];
return accum;
}
/// Matrix product of size 4-by-2-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 2> operator*(Matrix<Element, 4, 2> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> product(
Matrix<Element, 4, 3> const &rhs,
Matrix<Element, 4, 3> accum = Matrix<Element, 4, 3>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[4] * rhs.data[0];
accum.data[4] += data[4] * rhs.data[1];
accum.data[5] += data[4] * rhs.data[2];
accum.data[6] += data[8] * rhs.data[0];
accum.data[7] += data[8] * rhs.data[1];
accum.data[8] += data[8] * rhs.data[2];
accum.data[9] += data[12] * rhs.data[0];
accum.data[10] += data[12] * rhs.data[1];
accum.data[11] += data[12] * rhs.data[2];
// k=1
accum.data[0] += data[1] * rhs.data[3];
accum.data[1] += data[1] * rhs.data[4];
accum.data[2] += data[1] * rhs.data[5];
accum.data[3] += data[5] * rhs.data[3];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[9] * rhs.data[3];
accum.data[7] += data[9] * rhs.data[4];
accum.data[8] += data[9] * rhs.data[5];
accum.data[9] += data[13] * rhs.data[3];
accum.data[10] += data[13] * rhs.data[4];
accum.data[11] += data[13] * rhs.data[5];
// k=2
accum.data[0] += data[2] * rhs.data[6];
accum.data[1] += data[2] * rhs.data[7];
accum.data[2] += data[2] * rhs.data[8];
accum.data[3] += data[6] * rhs.data[6];
accum.data[4] += data[6] * rhs.data[7];
accum.data[5] += data[6] * rhs.data[8];
accum.data[6] += data[10] * rhs.data[6];
accum.data[7] += data[10] * rhs.data[7];
accum.data[8] += data[10] * rhs.data[8];
accum.data[9] += data[14] * rhs.data[6];
accum.data[10] += data[14] * rhs.data[7];
accum.data[11] += data[14] * rhs.data[8];
// k=3
accum.data[0] += data[3] * rhs.data[9];
accum.data[1] += data[3] * rhs.data[10];
accum.data[2] += data[3] * rhs.data[11];
accum.data[3] += data[7] * rhs.data[9];
accum.data[4] += data[7] * rhs.data[10];
accum.data[5] += data[7] * rhs.data[11];
accum.data[6] += data[11] * rhs.data[9];
accum.data[7] += data[11] * rhs.data[10];
accum.data[8] += data[11] * rhs.data[11];
accum.data[9] += data[15] * rhs.data[9];
accum.data[10] += data[15] * rhs.data[10];
accum.data[11] += data[15] * rhs.data[11];
return accum;
}
/// Matrix product of size 4-by-3-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 3> operator*(Matrix<Element, 4, 3> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> product(
Matrix<Element, 4, 4> const &rhs,
Matrix<Element, 4, 4> accum = Matrix<Element, 4, 4>()
) const {
// k=0
accum.data[0] += data[0] * rhs.data[0];
accum.data[1] += data[0] * rhs.data[1];
accum.data[2] += data[0] * rhs.data[2];
accum.data[3] += data[0] * rhs.data[3];
accum.data[4] += data[4] * rhs.data[0];
accum.data[5] += data[4] * rhs.data[1];
accum.data[6] += data[4] * rhs.data[2];
accum.data[7] += data[4] * rhs.data[3];
accum.data[8] += data[8] * rhs.data[0];
accum.data[9] += data[8] * rhs.data[1];
accum.data[10] += data[8] * rhs.data[2];
accum.data[11] += data[8] * rhs.data[3];
accum.data[12] += data[12] * rhs.data[0];
accum.data[13] += data[12] * rhs.data[1];
accum.data[14] += data[12] * rhs.data[2];
accum.data[15] += data[12] * rhs.data[3];
// k=1
accum.data[0] += data[1] * rhs.data[4];
accum.data[1] += data[1] * rhs.data[5];
accum.data[2] += data[1] * rhs.data[6];
accum.data[3] += data[1] * rhs.data[7];
accum.data[4] += data[5] * rhs.data[4];
accum.data[5] += data[5] * rhs.data[5];
accum.data[6] += data[5] * rhs.data[6];
accum.data[7] += data[5] * rhs.data[7];
accum.data[8] += data[9] * rhs.data[4];
accum.data[9] += data[9] * rhs.data[5];
accum.data[10] += data[9] * rhs.data[6];
accum.data[11] += data[9] * rhs.data[7];
accum.data[12] += data[13] * rhs.data[4];
accum.data[13] += data[13] * rhs.data[5];
accum.data[14] += data[13] * rhs.data[6];
accum.data[15] += data[13] * rhs.data[7];
// k=2
accum.data[0] += data[2] * rhs.data[8];
accum.data[1] += data[2] * rhs.data[9];
accum.data[2] += data[2] * rhs.data[10];
accum.data[3] += data[2] * rhs.data[11];
accum.data[4] += data[6] * rhs.data[8];
accum.data[5] += data[6] * rhs.data[9];
accum.data[6] += data[6] * rhs.data[10];
accum.data[7] += data[6] * rhs.data[11];
accum.data[8] += data[10] * rhs.data[8];
accum.data[9] += data[10] * rhs.data[9];
accum.data[10] += data[10] * rhs.data[10];
accum.data[11] += data[10] * rhs.data[11];
accum.data[12] += data[14] * rhs.data[8];
accum.data[13] += data[14] * rhs.data[9];
accum.data[14] += data[14] * rhs.data[10];
accum.data[15] += data[14] * rhs.data[11];
// k=3
accum.data[0] += data[3] * rhs.data[12];
accum.data[1] += data[3] * rhs.data[13];
accum.data[2] += data[3] * rhs.data[14];
accum.data[3] += data[3] * rhs.data[15];
accum.data[4] += data[7] * rhs.data[12];
accum.data[5] += data[7] * rhs.data[13];
accum.data[6] += data[7] * rhs.data[14];
accum.data[7] += data[7] * rhs.data[15];
accum.data[8] += data[11] * rhs.data[12];
accum.data[9] += data[11] * rhs.data[13];
accum.data[10] += data[11] * rhs.data[14];
accum.data[11] += data[11] * rhs.data[15];
accum.data[12] += data[15] * rhs.data[12];
accum.data[13] += data[15] * rhs.data[13];
accum.data[14] += data[15] * rhs.data[14];
accum.data[15] += data[15] * rhs.data[15];
return accum;
}
/// Matrix product of size 4-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix<Element, 4, 4> operator*(Matrix<Element, 4, 4> const &rhs) const {
return product(rhs);
}
/// Matrix product of size 4-by-4-by-4
CUTLASS_HOST_DEVICE
Matrix & operator*=(Matrix<Element, 4, 4> const &rhs) {
*this = product(rhs);
return *this;
}
/// Returns the sum of elements
CUTLASS_HOST_DEVICE
Element sum(Element accum = Element()) const {
accum += data[0];
accum += data[1];
accum += data[2];
accum += data[3];
accum += data[4];
accum += data[5];
accum += data[6];
accum += data[7];
accum += data[8];
accum += data[9];
accum += data[10];
accum += data[11];
accum += data[12];
accum += data[13];
accum += data[14];
accum += data[15];
return accum;
}
/// Returns the sum of squared elements
CUTLASS_HOST_DEVICE
Element norm(Element accum = Element()) const {
accum += data[0] * data[0];
accum += data[1] * data[1];
accum += data[2] * data[2];
accum += data[3] * data[3];
accum += data[4] * data[4];
accum += data[5] * data[5];
accum += data[6] * data[6];
accum += data[7] * data[7];
accum += data[8] * data[8];
accum += data[9] * data[9];
accum += data[10] * data[10];
accum += data[11] * data[11];
accum += data[12] * data[12];
accum += data[13] * data[13];
accum += data[14] * data[14];
accum += data[15] * data[15];
return accum;
}
/// Returns square root of the norm
CUTLASS_HOST_DEVICE
Element magnitude() const {
return fast_sqrt(norm());
}
/// Returns the sum of diagonal elements
CUTLASS_HOST_DEVICE
Element trace(Element accum = Element()) const {
accum += data[0];
accum += data[5];
accum += data[10];
accum += data[15];
return accum;
}
/// Returns 4-by-4 rotation matrix around the X axis
CUTLASS_HOST_DEVICE
static Matrix rotation_X(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(1, 1) = c;
m.at(1, 2) = -s;
m.at(2, 1) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 4-by-4 rotation matrix around the Y axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Y(Element theta) {
Matrix m = identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(2, 0) = -s;
m.at(0, 2) = s;
m.at(2, 2) = c;
return m;
}
/// Returns 4-by-4 rotation matrix around the Z axis
CUTLASS_HOST_DEVICE
static Matrix rotation_Z(Element theta) {
Matrix m = Matrix::identity();
Element c = fast_cos(theta);
Element s = fast_sin(theta);
m.at(0, 0) = c;
m.at(0, 1) = -s;
m.at(1, 0) = s;
m.at(1, 1) = c;
return m;
}
/// Returns a 4-by-4 rotation matrix around a unit-length axis
CUTLASS_HOST_DEVICE
static Matrix rotation(Element theta, Matrix<Element, 3, 1> const &u) {
Element x = u.data[0];
Element y = u.data[1];
Element z = u.data[2];
Element c = fast_cos(theta);
Element s = fast_sin(theta);
Element one_minus_cos = Element(1) - fast_cos(theta);
Matrix m;
m.set_slice3x3({
c + x * x * one_minus_cos, x * y * one_minus_cos - z * s, x * z * one_minus_cos + y * s,
y * x * one_minus_cos * z * s, c + y * y * one_minus_cos, y * z * one_minus_cos - x * s,
z * x * one_minus_cos - y * s, z * y * one_minus_cos + x * s, c + z * z * one_minus_cos
});
return m;
}
/// Returns a 4-by-4 reflection about the plane specified by the
/// unit-length normal vector n_unit
CUTLASS_HOST_DEVICE
static Matrix reflection(Matrix<Element, 3, 1> const &n_unit) {
Element a = n_unit.data[0];
Element b = n_unit.data[1];
Element c = n_unit.data[2];
Matrix m = Matrix::identity();
m.set_slice3x3({
Element(1) - Element(2) * a * a, Element(-2) * a * b, Element(-2) * a * c,
Element(-2) * a * b, Element(1) - Element(2) * b * b, Element(-2) * b * c,
Element(-2) * a * c, Element(-2) * b * c, Element(1) - Element(2) * c * c
});
return m;
}
/// Returns a perspective projection matrix typical of OpenGL applications
CUTLASS_HOST_DEVICE
static Matrix perspective(Element near_plane, Element far_plane, Element fovH, Element fovV) {
Element aspect = fovH / fovV;
Element f = Element(cos(fovV)) / Element(fovH);
Element Q = near_plane - far_plane;
return Matrix(
f / aspect, 0, 0, 0,
0, f, 0, 0,
0, 0, (near_plane + far_plane) / Q, Element(2) * far_plane * near_plane / Q,
0, 0, -1, 0
);
}
CUTLASS_HOST_DEVICE
static Matrix translation(Matrix<Element, 3, 1> const &v) {
return Matrix(
1, 0, 0, v.data[0],
0, 1, 0, v.data[1],
0, 0, 1, v.data[2],
0, 0, 0, 1
);
}
/// Computes the determinant of a 4-by-4 matrix
CUTLASS_HOST_DEVICE
Element determinant(Element accum = Element()) const {
accum += at(0, 0) * Matrix<Element, 3, 3>({ at(1, 1), at(1, 2), at(1, 3), at(2, 1), at(2, 2), at(2, 3), at(3, 1), at(3, 2), at(3, 3) }).determinant();
accum -= at(0, 1) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 2), at(1, 3), at(2, 0), at(2, 2), at(2, 3), at(3, 0), at(3, 2), at(3, 3) }).determinant();
accum += at(0, 2) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 1), at(1, 3), at(2, 0), at(2, 1), at(2, 3), at(3, 0), at(3, 1), at(3, 3) }).determinant();
accum -= at(0, 3) * Matrix<Element, 3, 3>({ at(1, 0), at(1, 1), at(1, 2), at(2, 0), at(2, 1), at(2, 2), at(3, 0), at(3, 1), at(3, 2) }).determinant();
return accum;
}
/// Computes the inverse of a 4-by-4 matrix (ignores the optional argument)
CUTLASS_HOST_DEVICE
Matrix inverse(Element ignore = 1) const {
Matrix<Element, 2, 2> B = slice_2x2(0, 2);
Matrix<Element, 2, 2> A = slice_2x2(0, 0);
Matrix<Element, 2, 2> C = slice_2x2(2, 0);
Matrix<Element, 2, 2> D = slice_2x2(2, 2);
Matrix<Element, 2, 2> D_inv = D.inverse();
Matrix<Element, 2, 2> E = (A - B * D_inv * C).inverse();
return Matrix::block(
E, -E * B * D_inv,
-D_inv * C * E, D_inv + D_inv * C * E * B * D_inv
);
}
};
/// Template alias for 4-by-4 matrix
template <typename Element>
using Matrix4x4 = Matrix<Element, 4, 4>;
/// Free funciton to infer element type from template arguments
template <typename Element>
CUTLASS_HOST_DEVICE Matrix4x4<Element> make_Matrix4x4(
Element _0_0, Element _0_1, Element _0_2, Element _0_3,
Element _1_0, Element _1_1, Element _1_2, Element _1_3,
Element _2_0, Element _2_1, Element _2_2, Element _2_3,
Element _3_0, Element _3_1, Element _3_2, Element _3_3
) {
return Matrix4x4<Element>(
_0_0, _0_1, _0_2, _0_3,
_1_0, _1_1, _1_2, _1_3,
_2_0, _2_1, _2_2, _2_3,
_3_0, _3_1, _3_2, _3_3
);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Elementwise scalar multiplication
template <typename Element, int Rows, int Columns>
CUTLASS_HOST_DEVICE
Matrix<Element, Rows, Columns> operator*(Element s, Matrix<Element, Rows, Columns> const &rhs) {
return rhs.multiply(s);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 364,115 | C | 24.769002 | 154 | 0.556217 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/pitch_linear_coord.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines layout functions used by TensorRef and derived classes for pitch-linear memory.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Template defining a shape used by pitch-linear operators
template <
int Contiguous,
int Strided
>
struct PitchLinearShape {
static int const kContiguous = Contiguous;
static int const kStrided = Strided;
static int const kCount = Contiguous * Strided;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Coordinate in pitch-linear space
struct PitchLinearCoord : public Coord<2, int> {
public:
/// Integer-valued index
using Index = int;
/// Base type is a Coord of rank=2
using Base = Coord<2, Index>;
/// Long integer type
using LongIndex = typename Base::LongIndex;
private:
/// Rows dimension
static int const kContiguous = 0;
/// Columns dimension
static int const kStrided = 1;
public:
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
PitchLinearCoord() { }
/// Constructs from Coord<2>
CUTLASS_HOST_DEVICE
PitchLinearCoord(Coord<2, Index> const &coord): Base(coord) { }
/// Helper to construct from a row and column
CUTLASS_HOST_DEVICE
PitchLinearCoord(Index contiguous_, Index strided_): Base(make_Coord(contiguous_, strided_)) { }
/// Helper to construct from a row and column based on LongIndex
CUTLASS_HOST_DEVICE
PitchLinearCoord(LongIndex contiguous_, LongIndex strided_)
: Base(make_Coord(Index(contiguous_), Index(strided_))) { }
/// Returns the contiguous dimension
CUTLASS_HOST_DEVICE
Index const & contiguous() const { return this->at(kContiguous); }
/// Returns the contiguous dimension
CUTLASS_HOST_DEVICE
Index & contiguous() { return this->at(kContiguous); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & strided() const { return this->at(kStrided); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & strided() { return this->at(kStrided); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
PitchLinearCoord operator+(Base const& b) const {
return PitchLinearCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
PitchLinearCoord operator-(Base const& b) const {
return PitchLinearCoord(Base::operator-(b));
}
CUTLASS_HOST_DEVICE
PitchLinearCoord operator-() const {
return PitchLinearCoord(-at(0), -at(1));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
PitchLinearCoord operator*(Base const& b) const {
return PitchLinearCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
PitchLinearCoord operator/(Base const& b) const {
return PitchLinearCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
PitchLinearCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 5,492 | C | 29.181319 | 100 | 0.652586 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/tfloat32.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a proxy class for storing Tensor Float 32 data type.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include "cutlass/floating_point_nvrtc.h"
#else
#include <cmath>
#include <limits>
#include <cstdint>
#endif
#include "cutlass/cutlass.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Tensor Float 32 data type
struct alignas(4) tfloat32_t {
//
// Data members
//
/// Storage type
uint32_t storage;
//
// Methods
//
/// Constructs from an unsigned int
CUTLASS_HOST_DEVICE
static tfloat32_t bitcast(uint32_t x) {
tfloat32_t h;
h.storage = x;
return h;
}
/// Emulated rounding is fast in device code
CUTLASS_HOST_DEVICE
static tfloat32_t round_half_ulp_truncate(float const &s) {
uint32_t x = reinterpret_cast<uint32_t const &>(s);
#if defined(__CUDA_ARCH__)
if (::isfinite(s)) {
x += 0x1000u;
}
#else
if (std::isfinite(s)) {
x += 0x1000u;
}
#endif
return tfloat32_t::bitcast(x);
}
/// Default constructor
tfloat32_t() = default;
/// Floating-point conversion - round toward nearest even
CUTLASS_HOST_DEVICE
// explicit tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { }
tfloat32_t(float x): storage(round_half_ulp_truncate(x).storage) { }
/// Floating-point conversion - round toward nearest even
CUTLASS_HOST_DEVICE
// explicit tfloat32_t(double x): tfloat32_t(float(x)) {
tfloat32_t(double x): tfloat32_t(float(x)) {
}
/// Integer conversion - round toward zero
CUTLASS_HOST_DEVICE
// explicit tfloat32_t(int x) {
tfloat32_t(int x) {
float flt = static_cast<float>(x);
#if defined(__CUDA_ARCH__)
storage = reinterpret_cast<uint32_t const &>(flt);
#else
std::memcpy(&storage, &flt, sizeof(storage));
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
operator float() const {
// Conversions to IEEE single-precision requires clearing dont-care bits
// of the mantissa.
unsigned bits = (storage & ~0x1fffu);
#if defined(__CUDA_ARCH__)
return reinterpret_cast<float const &>(bits);
#else
float flt;
std::memcpy(&flt, &bits, sizeof(flt));
return flt;
#endif
}
/// Converts to float
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(float(*this));
}
/// Converts to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(float(*this));
}
/// Casts to bool
CUTLASS_HOST_DEVICE
explicit operator bool() const {
return (float(*this) != 0.0f);
}
/// Obtains raw bits
CUTLASS_HOST_DEVICE
uint32_t raw() const {
return storage;
}
/// Returns the sign bit
CUTLASS_HOST_DEVICE
bool signbit() const {
return ((raw() & 0x80000000) != 0);
}
/// Returns the biased exponent
CUTLASS_HOST_DEVICE
int exponent_biased() const {
return int((raw() >> 23) & 0x0ff);
}
/// Returns the unbiased exponent
CUTLASS_HOST_DEVICE
int exponent() const {
return exponent_biased() - 127;
}
/// Returns the mantissa
CUTLASS_HOST_DEVICE
int mantissa() const {
return int(raw() & 0x7fffff);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool signbit(cutlass::tfloat32_t const& h) {
return h.signbit();
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t abs(cutlass::tfloat32_t const& h) {
return cutlass::tfloat32_t::bitcast(h.raw() & 0x7fffffff);
}
CUTLASS_HOST_DEVICE
bool isnan(cutlass::tfloat32_t const& h) {
return (h.exponent_biased() == 0x0ff) && h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isfinite(cutlass::tfloat32_t const& h) {
return (h.exponent_biased() != 0x0ff);
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t nan_tf32(const char*) {
// NVIDIA canonical NaN
return cutlass::tfloat32_t::bitcast(0x7fffffff);
}
CUTLASS_HOST_DEVICE
bool isinf(cutlass::tfloat32_t const& h) {
return (h.exponent_biased() == 0x0ff) && !h.mantissa();
}
CUTLASS_HOST_DEVICE
bool isnormal(cutlass::tfloat32_t const& h) {
return h.exponent_biased() && h.exponent_biased() != 0x0ff;
}
CUTLASS_HOST_DEVICE
int fpclassify(cutlass::tfloat32_t const& h) {
int exp = h.exponent_biased();
int mantissa = h.mantissa();
if (exp == 0x0ff) {
if (mantissa) {
return FP_NAN;
}
else {
return FP_INFINITE;
}
}
else if (!exp) {
if (mantissa) {
return FP_SUBNORMAL;
}
else {
return FP_ZERO;
}
}
return FP_NORMAL;
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t sqrt(cutlass::tfloat32_t const& h) {
#if defined(__CUDACC_RTC__)
return cutlass::tfloat32_t(sqrtf(float(h)));
#else
return cutlass::tfloat32_t(std::sqrt(float(h)));
#endif
}
CUTLASS_HOST_DEVICE
tfloat32_t copysign(tfloat32_t const& a, tfloat32_t const& b) {
uint32_t a_mag = (reinterpret_cast<uint32_t const &>(a) & 0x7fffffff);
uint32_t b_sign = (reinterpret_cast<uint32_t const &>(b) & 0x80000000);
uint32_t result = (a_mag | b_sign);
return reinterpret_cast<tfloat32_t const &>(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Standard Library operations and definitions
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace std {
#if !defined(__CUDACC_RTC__)
/// Numeric limits
template <>
struct numeric_limits<cutlass::tfloat32_t> {
static bool const is_specialized = true;
static bool const is_signed = true;
static bool const is_integer = false;
static bool const is_exact = false;
static bool const has_infinity = true;
static bool const has_quiet_NaN = true;
static bool const has_signaling_NaN = false;
static std::float_denorm_style const has_denorm = std::denorm_present;
static bool const has_denorm_loss = true;
static std::float_round_style const round_style = std::round_to_nearest;
static bool const is_iec559 = false;
static bool const is_bounded = true;
static bool const is_modulo = false;
static int const digits = 19;
/// Least positive value
static cutlass::tfloat32_t min() { return cutlass::tfloat32_t::bitcast(0x01); }
/// Minimum finite value
static cutlass::tfloat32_t lowest() { return cutlass::tfloat32_t::bitcast(0xff7fffff); }
/// Maximum finite value
static cutlass::tfloat32_t max() { return cutlass::tfloat32_t::bitcast(0x7f7fffff); }
/// Returns smallest finite value
static cutlass::tfloat32_t epsilon() { return cutlass::tfloat32_t::bitcast(0x1000); }
/// Returns smallest finite value
static cutlass::tfloat32_t round_error() { return cutlass::tfloat32_t(0.5f); }
/// Returns smallest finite value
static cutlass::tfloat32_t infinity() { return cutlass::tfloat32_t::bitcast(0x7f800000); }
/// Returns smallest finite value
static cutlass::tfloat32_t quiet_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); }
/// Returns smallest finite value
static cutlass::tfloat32_t signaling_NaN() { return cutlass::tfloat32_t::bitcast(0x7fffffff); }
/// Returns smallest finite value
static cutlass::tfloat32_t denorm_min() { return cutlass::tfloat32_t::bitcast(0x1); }
};
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace std
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// Arithmetic operators
//
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
CUTLASS_HOST_DEVICE
bool operator==(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) == float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator!=(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) != float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) < float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator<=(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) <= float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) > float(rhs);
}
CUTLASS_HOST_DEVICE
bool operator>=(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return float(lhs) >= float(rhs);
}
CUTLASS_HOST_DEVICE
tfloat32_t operator+(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) + float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t operator-(tfloat32_t const& lhs) {
union u_tff32 {
float val_f32;
tfloat32_t val_tf;
CUTLASS_HOST_DEVICE u_tff32() : val_f32(0) { }
};
union u_tff32 x; x.val_f32 = -reinterpret_cast<float const &>(lhs);
return x.val_tf;
}
CUTLASS_HOST_DEVICE
tfloat32_t operator-(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) - float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t operator*(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) * float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t operator/(tfloat32_t const& lhs, tfloat32_t const& rhs) {
return tfloat32_t(float(lhs) / float(rhs));
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator+=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) + float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator-=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) - float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator*=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) * float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator/=(tfloat32_t & lhs, tfloat32_t const& rhs) {
lhs = tfloat32_t(float(lhs) / float(rhs));
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator++(tfloat32_t & lhs) {
float tmp(lhs);
++tmp;
lhs = tfloat32_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t& operator--(tfloat32_t & lhs) {
float tmp(lhs);
--tmp;
lhs = tfloat32_t(tmp);
return lhs;
}
CUTLASS_HOST_DEVICE
tfloat32_t operator++(tfloat32_t & lhs, int) {
tfloat32_t ret(lhs);
float tmp(lhs);
tmp++;
lhs = tfloat32_t(tmp);
return ret;
}
CUTLASS_HOST_DEVICE
tfloat32_t operator--(tfloat32_t & lhs, int) {
tfloat32_t ret(lhs);
float tmp(lhs);
tmp--;
lhs = tfloat32_t(tmp);
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// User-defined literals
//
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t operator "" _tf32(long double x) {
return cutlass::tfloat32_t(float(x));
}
CUTLASS_HOST_DEVICE
cutlass::tfloat32_t operator "" _tf32(unsigned long long int x) {
return cutlass::tfloat32_t(int(x));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 13,017 | C | 26.23431 | 100 | 0.611585 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/matrix_coord.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a canonical coordinate for rank=2 matrices offering named indices.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// MatrixCoord wraps Coord<2, int> to provide a helper for accessing named dimensions. Classes
/// expecting a coordinate in the rank=2 index space of a matrix should use MatrixCoord.
struct MatrixCoord : public Coord<2, int> {
public:
/// Integer-valued index
using Index = int;
/// Base type is a Coord of rank=2
using Base = Coord<2, Index>;
/// LongIndex type
using LongIndex = typename Base::LongIndex;
private:
/// Rows dimension
static int const kRow = 0;
/// Columns dimension
static int const kColumn = 1;
public:
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
MatrixCoord() { }
/// Constructs from Coord<2>
CUTLASS_HOST_DEVICE
MatrixCoord(Coord<2, Index> const &coord): Base(coord) { }
/// Helper to construct from a row and column
CUTLASS_HOST_DEVICE
MatrixCoord(Index row, Index column): Base(make_Coord(row, column)) { }
/// Helper to construct from a row and column, which are LongIndex based
CUTLASS_HOST_DEVICE
MatrixCoord(LongIndex row, LongIndex column): Base(make_Coord(Index(row), Index(column))) { }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index const & row() const { return this->at(kRow); }
/// Returns the row of the coordinate
CUTLASS_HOST_DEVICE
Index & row() { return this->at(kRow); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index const & column() const { return this->at(kColumn); }
/// Returns the column of the coordinate
CUTLASS_HOST_DEVICE
Index & column() { return this->at(kColumn); }
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
MatrixCoord operator+(Base const& b) const {
return MatrixCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
MatrixCoord operator-(Base const& b) const {
return MatrixCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
MatrixCoord operator*(Base const& b) const {
return MatrixCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
MatrixCoord operator/(Base const& b) const {
return MatrixCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
MatrixCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
MatrixCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
MatrixCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
MatrixCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 4,991 | C | 29.254545 | 100 | 0.652775 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/array_subbyte.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Statically sized array for any data type
template <
typename T,
int N
>
class Array<T, N, false> {
public:
static int const kSizeBits = sizeof_bits<T>::value * N;
/// Storage type
using Storage = typename platform::conditional<
((kSizeBits % 32) != 0),
typename platform::conditional<
((kSizeBits % 16) != 0),
uint8_t,
uint16_t
>::type,
uint32_t
>::type;
/// Element type
using Element = T;
/// Number of logical elements per stored object
static int const kElementsPerStoredItem = int(sizeof(Storage) * 8) / sizeof_bits<T>::value;
/// Number of storage elements
static size_t const kStorageElements = N / kElementsPerStoredItem;
/// Number of logical elements
static size_t const kElements = N;
/// Bitmask for covering one item
static Storage const kMask = ((Storage(1) << sizeof_bits<T>::value) - 1);
//
// C++ standard members with pointer types removed
//
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef value_type *pointer;
typedef value_type const *const_pointer;
//
// References
//
/// Reference object inserts or extracts sub-byte items
class reference {
/// Pointer to storage element
Storage *ptr_;
/// Index into elements packed into Storage object
int idx_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
reference(): ptr_(nullptr), idx_(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
reference(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
/// Assignment
CUTLASS_HOST_DEVICE
reference &operator=(T x) {
Storage item = (reinterpret_cast<Storage const &>(x) & kMask);
Storage kUpdateMask = Storage(~(kMask << (idx_ * sizeof_bits<T>::value)));
*ptr_ = Storage(((*ptr_ & kUpdateMask) | (item << idx_ * sizeof_bits<T>::value)));
return *this;
}
CUTLASS_HOST_DEVICE
T get() const {
Storage item = Storage((*ptr_ >> (idx_ * sizeof_bits<T>::value)) & kMask);
return reinterpret_cast<T const &>(item);
}
/// Extract
CUTLASS_HOST_DEVICE
operator T() const {
return get();
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
};
/// Reference object extracts sub-byte items
class const_reference {
/// Pointer to storage element
Storage const *ptr_;
/// Index into elements packed into Storage object
int idx_;
public:
/// Default ctor
CUTLASS_HOST_DEVICE
const_reference(): ptr_(nullptr), idx_(0) { }
/// Ctor
CUTLASS_HOST_DEVICE
const_reference(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
CUTLASS_HOST_DEVICE
const T get() const {
Storage item = (*ptr_ >> (idx_ * sizeof_bits<T>::value)) & kMask;
return reinterpret_cast<T const &>(item);
}
/// Extract
CUTLASS_HOST_DEVICE
operator T() const {
Storage item = Storage(Storage(*ptr_ >> Storage(idx_ * sizeof_bits<T>::value)) & kMask);
return reinterpret_cast<T const &>(item);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
};
//
// Iterators
//
/// Bidirectional iterator over elements
class iterator {
/// Pointer to storage element
Storage *ptr_;
/// Index into elements packed into Storage object
int idx_;
public:
CUTLASS_HOST_DEVICE
iterator(): ptr_(nullptr), idx_(0) { }
CUTLASS_HOST_DEVICE
iterator(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
CUTLASS_HOST_DEVICE
iterator &operator++() {
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator &operator--() {
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator operator++(int) {
iterator ret(*this);
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return ret;
}
CUTLASS_HOST_DEVICE
iterator operator--(int) {
iterator ret(*this);
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return ret;
}
CUTLASS_HOST_DEVICE
reference operator*() const {
return reference(ptr_, idx_);
}
CUTLASS_HOST_DEVICE
bool operator==(iterator const &other) const {
return ptr_ == other.ptr_ && idx_ == other.idx_;
}
CUTLASS_HOST_DEVICE
bool operator!=(iterator const &other) const {
return !(*this == other);
}
};
/// Bidirectional constant iterator over elements
class const_iterator {
/// Pointer to storage element
Storage const *ptr_;
/// Index into elements packed into Storage object
int idx_;
public:
CUTLASS_HOST_DEVICE
const_iterator(): ptr_(nullptr), idx_(0) { }
CUTLASS_HOST_DEVICE
const_iterator(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
CUTLASS_HOST_DEVICE
iterator &operator++() {
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator &operator--() {
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return *this;
}
CUTLASS_HOST_DEVICE
iterator operator++(int) {
iterator ret(*this);
++idx_;
if (idx_ == kElementsPerStoredItem) {
++ptr_;
idx_ = 0;
}
return ret;
}
CUTLASS_HOST_DEVICE
iterator operator--(int) {
iterator ret(*this);
if (!idx_) {
--ptr_;
idx_ = kElementsPerStoredItem - 1;
}
else {
--idx_;
}
return ret;
}
CUTLASS_HOST_DEVICE
const_reference operator*() const {
return const_reference(ptr_, idx_);
}
CUTLASS_HOST_DEVICE
bool operator==(iterator const &other) const {
return ptr_ == other.ptr_ && idx_ == other.idx_;
}
CUTLASS_HOST_DEVICE
bool operator!=(iterator const &other) const {
return !(*this == other);
}
};
/// Bidirectional iterator over elements
class reverse_iterator {
/// Pointer to storage element
Storage *ptr_;
/// Index into elements packed into Storage object
int idx_;
public:
CUTLASS_HOST_DEVICE
reverse_iterator(): ptr_(nullptr), idx_(0) { }
CUTLASS_HOST_DEVICE
reverse_iterator(Storage *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
// TODO
};
/// Bidirectional constant iterator over elements
class const_reverse_iterator {
/// Pointer to storage element
Storage const *ptr_;
/// Index into elements packed into Storage object
int idx_;
public:
CUTLASS_HOST_DEVICE
const_reverse_iterator(): ptr_(nullptr), idx_(0) { }
CUTLASS_HOST_DEVICE
const_reverse_iterator(Storage const *ptr, int idx = 0): ptr_(ptr), idx_(idx) { }
// TODO
};
private:
/// Internal storage
Storage storage[kStorageElements];
public:
#if 0
CUTLASS_HOST_DEVICE
Array() { }
CUTLASS_HOST_DEVICE
Array(Array const &x) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(kStorageElements); ++i) {
storage[i] = x.storage[i];
}
}
#endif
/// Efficient clear method
CUTLASS_HOST_DEVICE
void clear() {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < int(kStorageElements); ++i) {
storage[i] = Storage(0);
}
}
CUTLASS_HOST_DEVICE
reference at(size_type pos) {
return reference(storage + pos / kElementsPerStoredItem, pos % kElementsPerStoredItem);
}
CUTLASS_HOST_DEVICE
const_reference at(size_type pos) const {
return const_reference(storage + pos / kElementsPerStoredItem, pos % kElementsPerStoredItem);
}
CUTLASS_HOST_DEVICE
reference operator[](size_type pos) {
return at(pos);
}
CUTLASS_HOST_DEVICE
const_reference operator[](size_type pos) const {
return at(pos);
}
CUTLASS_HOST_DEVICE
reference front() {
return at(0);
}
CUTLASS_HOST_DEVICE
const_reference front() const {
return at(0);
}
CUTLASS_HOST_DEVICE
reference back() {
return reference(storage + kStorageElements - 1, kElementsPerStoredItem - 1);
}
CUTLASS_HOST_DEVICE
const_reference back() const {
return const_reference(storage + kStorageElements - 1, kElementsPerStoredItem - 1);
}
CUTLASS_HOST_DEVICE
pointer data() {
return reinterpret_cast<pointer>(storage);
}
CUTLASS_HOST_DEVICE
const_pointer data() const {
return reinterpret_cast<const_pointer>(storage);
}
CUTLASS_HOST_DEVICE
Storage * raw_data() {
return storage;
}
CUTLASS_HOST_DEVICE
Storage const * raw_data() const {
return storage;
}
CUTLASS_HOST_DEVICE
constexpr bool empty() const {
return !kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
constexpr size_type max_size() const {
return kElements;
}
CUTLASS_HOST_DEVICE
void fill(T const &value) {
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < kElementsPerStoredItem; ++i) {
reference ref(storage, i);
ref = value;
}
CUTLASS_PRAGMA_UNROLL
for (int i = 1; i < kStorageElements; ++i) {
storage[i] = storage[0];
}
}
CUTLASS_HOST_DEVICE
iterator begin() {
return iterator(storage);
}
CUTLASS_HOST_DEVICE
const_iterator cbegin() const {
return const_iterator(storage);
}
CUTLASS_HOST_DEVICE
iterator end() {
return iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
const_iterator cend() const {
return const_iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
reverse_iterator rbegin() {
return reverse_iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crbegin() const {
return const_reverse_iterator(storage + kStorageElements);
}
CUTLASS_HOST_DEVICE
reverse_iterator rend() {
return reverse_iterator(storage);
}
CUTLASS_HOST_DEVICE
const_reverse_iterator crend() const {
return const_reverse_iterator(storage);
}
//
// Comparison operators
//
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 13,154 | C | 22.119508 | 100 | 0.602934 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/fast_math.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#include <cmath>
#include <type_traits>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/uint128.h"
#include "cutlass/coord.h"
#include "cutlass/numeric_types.h"
/**
* \file
* \brief Math utilities
*/
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_HOST_DEVICE void swap(T &lhs, T &rhs) {
T tmp = lhs;
lhs = rhs;
rhs = tmp;
}
/******************************************************************************
* Static math utilities
******************************************************************************/
/// Mixed precision dot product
template <typename Index, typename LongIndex, int N>
CUTLASS_HOST_DEVICE LongIndex dot(
Coord<N, Index> const &coord,
Coord<N, LongIndex> const &stride,
LongIndex acc = LongIndex()) {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < N; ++n) {
acc += LongIndex(coord[n]) * stride[n];
}
return acc;
}
/**
* Statically determine if N is a power-of-two
*/
template <int N>
struct is_pow2 {
static bool const value = ((N & (N - 1)) == 0);
};
/**
* Statically determine log2(N), rounded down
*/
template <int N, int CurrentVal = N, int Count = 0>
struct log2_down {
/// Static logarithm value
enum { value = log2_down<N, (CurrentVal >> 1), Count + 1>::value };
};
// Base case
template <int N, int Count>
struct log2_down<N, 1, Count> {
enum { value = Count };
};
/**
* Statically determine log2(N), rounded up
*/
template <int N, int CurrentVal = N, int Count = 0>
struct log2_up {
/// Static logarithm value
enum { value = log2_up<N, (CurrentVal >> 1), Count + 1>::value };
};
// Base case
template <int N, int Count>
struct log2_up<N, 1, Count> {
enum { value = ((1 << Count) < N) ? Count + 1 : Count };
};
/**
* Statically estimate sqrt(N) to the nearest power-of-two
*/
template <int N>
struct sqrt_est {
enum { value = 1 << (log2_up<N>::value / 2) };
};
/**
* For performing a constant-division with a compile-time assertion that the
* Divisor evenly-divides the Dividend.
*/
template <int Dividend, int Divisor>
struct divide_assert {
enum { value = Dividend / Divisor };
static_assert((Dividend % Divisor == 0), "Not an even multiple");
};
/******************************************************************************
* Rounding
******************************************************************************/
/**
* Round dividend up to the nearest multiple of divisor
*/
template <typename dividend_t, typename divisor_t>
CUTLASS_HOST_DEVICE dividend_t round_nearest(dividend_t dividend, divisor_t divisor) {
return ((dividend + divisor - 1) / divisor) * divisor;
}
/**
* Greatest common divisor
*/
template <typename value_t>
CUTLASS_HOST_DEVICE value_t gcd(value_t a, value_t b) {
for (;;) {
if (a == 0) return b;
b %= a;
if (b == 0) return a;
a %= b;
}
}
/**
* Least common multiple
*/
template <typename value_t>
CUTLASS_HOST_DEVICE value_t lcm(value_t a, value_t b) {
value_t temp = gcd(a, b);
return temp ? (a / temp * b) : 0;
}
/// Returns the smallest value in the half-open range [a, a+b) that is a multiple of b
CUTLASS_HOST_DEVICE
constexpr int round_up(int a, int b) {
return ((a + b - 1) / b) * b;
}
/// Returns the ceiling of (a / b)
CUTLASS_HOST_DEVICE
constexpr int ceil_div(int a, int b) {
return (a + b - 1) / b;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/**
* log2 computation, what's the
* difference between the below codes and
* log2_up/down codes?
*/
template <typename value_t>
CUTLASS_HOST_DEVICE value_t clz(value_t x) {
for (int i = 31; i >= 0; --i) {
if ((1 << i) & x) return 31 - i;
}
return 32;
}
template <typename value_t>
CUTLASS_HOST_DEVICE value_t find_log2(value_t x) {
int a = int(31 - clz(x));
a += (x & (x - 1)) != 0; // Round up, add 1 if not a power of 2.
return a;
}
/**
* Find divisor, using find_log2
*/
CUTLASS_HOST_DEVICE
void find_divisor(unsigned int& mul, unsigned int& shr, unsigned int denom) {
if (denom == 1) {
mul = 0;
shr = 0;
} else {
unsigned int p = 31 + find_log2(denom);
unsigned m = unsigned(((1ull << p) + unsigned(denom) - 1) / unsigned(denom));
mul = m;
shr = p - 32;
}
}
/**
* Find quotient and remainder using device-side intrinsics
*/
CUTLASS_HOST_DEVICE
void fast_divmod(int& quo, int& rem, int src, int div, unsigned int mul, unsigned int shr) {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if div != 1, else simply copy the source.
quo = (div != 1) ? __umulhi(src, mul) >> shr : src;
#else
quo = int((div != 1) ? int(((int64_t)src * mul) >> 32) >> shr : src);
#endif
// The remainder.
rem = src - (quo * div);
}
// For long int input
CUTLASS_HOST_DEVICE
void fast_divmod(int& quo, int64_t& rem, int64_t src, int div, unsigned int mul, unsigned int shr) {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if div != 1, else simply copy the source.
quo = (div != 1) ? __umulhi(src, mul) >> shr : src;
#else
quo = int((div != 1) ? ((src * mul) >> 32) >> shr : src);
#endif
// The remainder.
rem = src - (quo * div);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Object to encapsulate the fast division+modulus operation.
///
/// This object precomputes two values used to accelerate the computation and is best used
/// when the divisor is a grid-invariant. In this case, it may be computed in host code and
/// marshalled along other kernel arguments using the 'Params' pattern.
///
/// Example:
///
///
/// int quotient, remainder, dividend, divisor;
///
/// FastDivmod divmod(divisor);
///
/// divmod(quotient, remainder, dividend);
///
/// // quotient = (dividend / divisor)
/// // remainder = (dividend % divisor)
///
struct FastDivmod {
int divisor;
unsigned int multiplier;
unsigned int shift_right;
/// Find quotient and remainder using device-side intrinsics
CUTLASS_HOST_DEVICE
void fast_divmod(int& quotient, int& remainder, int dividend) const {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if divisor != 1, else simply copy the source.
quotient = (divisor != 1) ? __umulhi(dividend, multiplier) >> shift_right : dividend;
#else
quotient = int((divisor != 1) ? int(((int64_t)dividend * multiplier) >> 32) >> shift_right : dividend);
#endif
// The remainder.
remainder = dividend - (quotient * divisor);
}
/// For long int input
CUTLASS_HOST_DEVICE
void fast_divmod(int& quotient, int64_t& remainder, int64_t dividend) const {
#if defined(__CUDA_ARCH__)
// Use IMUL.HI if divisor != 1, else simply copy the source.
quotient = (divisor != 1) ? __umulhi(dividend, multiplier) >> shift_right : dividend;
#else
quotient = int((divisor != 1) ? ((dividend * multiplier) >> 32) >> shift_right : dividend);
#endif
// The remainder.
remainder = dividend - (quotient * divisor);
}
/// Construct the FastDivmod object, in host code ideally.
///
/// This precomputes some values based on the divisor and is computationally expensive.
CUTLASS_HOST_DEVICE
FastDivmod(): divisor(0), multiplier(0), shift_right(0) { }
CUTLASS_HOST_DEVICE
FastDivmod(int divisor): divisor(divisor) {
if (divisor != 1) {
unsigned int p = 31 + find_log2(divisor);
unsigned m = unsigned(((1ull << p) + unsigned(divisor) - 1) / unsigned(divisor));
multiplier = m;
shift_right = p - 32;
} else {
multiplier = 0;
shift_right = 0;
}
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(int "ient, int &remainder, int dividend) const {
fast_divmod(quotient, remainder, dividend);
}
/// Computes integer division using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
int div(int dividend) const {
int quotient, remainder;
fast_divmod(quotient, remainder, dividend);
return quotient;
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
///
/// Simply returns the quotient
CUTLASS_HOST_DEVICE
int divmod(int &remainder, int dividend) const {
int quotient;
fast_divmod(quotient, remainder, dividend);
return quotient;
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(int "ient, int64_t &remainder, int64_t dividend) const {
fast_divmod(quotient, remainder, dividend);
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
int divmod(int64_t &remainder, int64_t dividend) const {
int quotient;
fast_divmod(quotient, remainder, dividend);
return quotient;
}
/// Returns the divisor when cast to integer
CUTLASS_HOST_DEVICE
operator int() const { return divisor; }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Object to encapsulate the fast division+modulus operation for 64b integer division.
///
/// This object precomputes two values used to accelerate the computation and is best used
/// when the divisor is a grid-invariant. In this case, it may be computed in host code and
/// marshalled along other kernel arguments using the 'Params' pattern.
///
/// Example:
///
///
/// uint64_t quotient, remainder, dividend, divisor;
///
/// FastDivmodU64 divmod(divisor);
///
/// divmod(quotient, remainder, dividend);
///
/// // quotient = (dividend / divisor)
/// // remainder = (dividend % divisor)
///
struct FastDivmodU64 {
uint64_t divisor;
uint64_t multiplier;
unsigned int shift_right;
unsigned int round_up;
//
// Static methods
//
/// Computes b, where 2^b is the greatest power of two that is less than or equal to x
CUTLASS_HOST_DEVICE
static uint32_t integer_log2(uint64_t x) {
uint32_t n = 0;
while (x >>= 1) {
++n;
}
return n;
}
/// Default ctor
CUTLASS_HOST_DEVICE
FastDivmodU64(): divisor(0), multiplier(0), shift_right(0), round_up(0) { }
/// Construct the FastDivmod object, in host code ideally.
///
/// This precomputes some values based on the divisor and is computationally expensive.
CUTLASS_HOST_DEVICE
FastDivmodU64(uint64_t divisor_): divisor(divisor_), multiplier(1), shift_right(0), round_up(0) {
if (divisor) {
shift_right = integer_log2(divisor);
if ((divisor & (divisor - 1)) == 0) {
multiplier = 0;
}
else {
uint64_t power_of_two = (uint64_t(1) << shift_right);
uint64_t multiplier_lo = uint128_t(0, power_of_two) / divisor;
multiplier = uint128_t(power_of_two, power_of_two) / divisor;
round_up = (multiplier_lo == multiplier ? 1 : 0);
}
}
}
/// Returns the quotient of floor(dividend / divisor)
CUTLASS_HOST_DEVICE
uint64_t divide(uint64_t dividend) const {
uint64_t quotient = 0;
#ifdef __CUDA_ARCH__
uint64_t x = dividend;
if (multiplier) {
x = __umul64hi(dividend + round_up, multiplier);
}
quotient = (x >> shift_right);
#else
// TODO - use proper 'fast' division here also. No reason why x86-code shouldn't be optimized.
quotient = dividend / divisor;
#endif
return quotient;
}
/// Computes the remainder given a computed quotient and dividend
CUTLASS_HOST_DEVICE
uint64_t modulus(uint64_t quotient, uint64_t dividend) const {
return uint32_t(dividend - quotient * divisor);
}
/// Returns the quotient of floor(dividend / divisor) and computes the remainder
CUTLASS_HOST_DEVICE
uint64_t divmod(uint64_t &remainder, uint64_t dividend) const {
uint64_t quotient = divide(dividend);
remainder = modulus(quotient, dividend);
return quotient;
}
/// Computes integer division and modulus using precomputed values. This is computationally
/// inexpensive.
CUTLASS_HOST_DEVICE
void operator()(uint64_t "ient, uint64_t &remainder, uint64_t dividend) const {
quotient = divmod(remainder, dividend);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Computes the coordinate decomposition from a linear index (64-bit linear index => coord<int32_t>)
///
/// This decomposition is accelerated by the FastDivmodU64 object. It is assumed that
/// a coordinate of <Rank> indices can be decomposed by <Rank - 1> div/mod operations.
/// Note, is assumed that element divmod[0] divides by extent[1].
///
/// For example, assume 4-D coordinate (n, p, q, c) is mapped to a linear index `npqc`. This
/// can be decomposed via three divide and modulus operations:
///
/// c = npqc % C; | divmod[2] = FastDivmodU64(C)
/// npq = npqc / C; | coord[3] = c
///
/// q = npq % Q; | divmod[1] = FastDivmodU64(Q)
/// np = npq / Q; | coord[2] = q
///
/// p = np % P; | divmod[0] = FastDivmodU64(P)
/// n = np / P; | coord[1] = p
///
/// | coord[0] = n
///
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecomposition(
uint64_t linear_idx, ///< Linear index to decompose
FastDivmodU64 const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank; i > 1; --i) {
uint64_t remainder;
linear_idx = divmod[i - 2].divmod(remainder, linear_idx);
coord[i - 1] = int(remainder);
}
coord[0] = int(linear_idx);
return coord;
}
/// Computes the coordinate decomposition from a linear index (32-bit linear index => coord<int32_t>)
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecomposition(
int linear_idx, ///< Linear index to decompose
FastDivmod const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = Rank; i > 1; --i) {
int remainder;
linear_idx = divmod[i - 2].divmod(remainder, linear_idx);
coord[i - 1] = int(remainder);
}
coord[0] = int(linear_idx);
return coord;
}
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecompositionLittleEndian(
uint64_t linear_idx, ///< Linear index to decompose
FastDivmodU64 const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank - 1; ++i) {
uint64_t remainder;
linear_idx = divmod[i].divmod(remainder, linear_idx);
coord[i] = int(remainder);
}
coord[Rank - 1] = int(linear_idx);
return coord;
}
/// Computes the coordinate decomposition from a linear index (32-bit linear index => coord<int32_t>)
template <int Rank>
CUTLASS_HOST_DEVICE Coord<Rank> CoordinateDecompositionLittleEndian(
int linear_idx, ///< Linear index to decompose
FastDivmod const *divmod) { ///< Pointer to array of Rank-1 FastDivmodU64 objects
static_assert(Rank > 0, "CoordinateDecomposition requires Rank=1 or greater.");
Coord<Rank> coord;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < Rank - 1; ++i) {
int remainder;
linear_idx = divmod[i].divmod(remainder, linear_idx);
coord[i] = int(remainder);
}
coord[Rank - 1] = int(linear_idx);
return coord;
}
/// Safely computes the offset of a linear index in bytes for all types
template <typename Element>
CUTLASS_HOST_DEVICE int64_t OffsetBytes(int64_t index) {
static_assert(
(sizeof_bits<Element>::value >= 8 && !(sizeof_bits<Element>::value % 8)) ||
(sizeof_bits<Element>::value < 8 && !(8 % sizeof_bits<Element>::value)),
"Size of numeric type in bits must either be divisible by 8 bits, or 8 bits must be divisible by the size.");
if (sizeof_bits<Element>::value >= 8) {
return index * (sizeof_bits<Element>::value / 8);
}
else {
int const kElementsPerByte = ((8 / sizeof_bits<Element>::value) + ((sizeof_bits<Element>::value >= 8) ? 1 : 0));
return index / kElementsPerByte;
}
}
CUTLASS_HOST_DEVICE int64_t OffsetBytes(int64_t index, int64_t element_sizeof_bits) {
if (element_sizeof_bits >= 8) {
return index * (element_sizeof_bits / 8);
}
else {
int64_t const kElementsPerByte = ((8 / element_sizeof_bits) + ((element_sizeof_bits >= 8) ? 1 : 0));
return index / kElementsPerByte;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////
// Min/Max
/////////////////////////////////////////////////////////////////////////////////////////////////
template <int A, int B>
struct Min {
static int const kValue = (A < B) ? A : B;
};
template <int A, int B>
struct Max {
static int const kValue = (A > B) ? A : B;
};
CUTLASS_HOST_DEVICE
constexpr int const_min(int a, int b) {
return (b < a ? b : a);
}
CUTLASS_HOST_DEVICE
constexpr int const_max(int a, int b) {
return (b > a ? b : a);
}
template <typename T>
CUTLASS_HOST_DEVICE
T fast_min(T a, T b) {
return (b < a ? b : a);
}
template <>
CUTLASS_HOST_DEVICE
float fast_min(float a, float b) {
return fminf(a, b);
}
template <typename T>
CUTLASS_HOST_DEVICE
T fast_max(T a, T b) {
return (a < b ? b : a);
}
template <>
CUTLASS_HOST_DEVICE
float fast_max(float a, float b) {
return fmaxf(a, b);
}
CUTLASS_HOST_DEVICE
float fast_cos(float theta) {
#if defined(__CUDA_ARCH__)
return ::cosf(theta);
#else
return std::cos(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_cos(double theta) {
#if defined(__CUDA_ARCH__)
return ::cos(theta);
#else
return std::cos(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_sin(float theta) {
#if defined(__CUDA_ARCH__)
return ::sinf(theta);
#else
return std::sin(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_sin(double theta) {
#if defined(__CUDA_ARCH__)
return ::sin(theta);
#else
return std::sin(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_acos(float theta) {
#if defined(__CUDA_ARCH__)
return ::acosf(theta);
#else
return std::acos(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_acos(double theta) {
#if defined(__CUDA_ARCH__)
return ::acos(theta);
#else
return std::acos(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_asin(float theta) {
#if defined(__CUDA_ARCH__)
return ::asinf(theta);
#else
return std::asin(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_asin(double theta) {
#if defined(__CUDA_ARCH__)
return ::asin(theta);
#else
return std::asin(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_sqrt(float theta) {
#if defined(__CUDA_ARCH__)
return ::sqrtf(theta);
#else
return std::sqrt(theta);
#endif
}
CUTLASS_HOST_DEVICE
double fast_sqrt(double theta) {
#if defined(__CUDA_ARCH__)
return ::sqrt(theta);
#else
return std::sqrt(theta);
#endif
}
CUTLASS_HOST_DEVICE
float fast_exp(float x) {
#if defined(__CUDA_ARCH__)
return ::expf(x);
#else
return std::exp(x);
#endif
}
CUTLASS_HOST_DEVICE
double fast_exp(double x) {
#if defined(__CUDA_ARCH__)
return ::exp(x);
#else
return std::exp(x);
#endif
}
CUTLASS_HOST_DEVICE
half_t fast_exp(half_t x) {
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 10) && (__CUDA_ARCH__ >= 750)
return (half_t)(::hexp(x.to_half()));
#else
return (half_t)(fast_exp(float(x)));
#endif
}
CUTLASS_HOST_DEVICE
float fast_log(float x) {
#if defined(__CUDA_ARCH__)
return ::logf(x);
#else
return std::log(x);
#endif
}
CUTLASS_HOST_DEVICE
double fast_log(double x) {
#if defined(__CUDA_ARCH__)
return ::log(x);
#else
return std::log(x);
#endif
}
CUTLASS_HOST_DEVICE
float fast_tanh(float x) {
#if defined(__CUDA_ARCH__)
#if (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750)
float y;
asm volatile ( "tanh.approx.f32 %0, %1; " : "=f"(y) : "f"(x));
return y;
#else
return ::tanhf(x);
#endif
#else
return std::tanh(x);
#endif
}
CUTLASS_HOST_DEVICE
double fast_tanh(double x) {
#if defined(__CUDA_ARCH__)
return ::tanh(x);
#else
return std::tanh(x);
#endif
}
CUTLASS_HOST_DEVICE
half_t fast_tanh(half_t x) {
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750)
asm volatile ( "tanh.approx.f16 %0, %1;" : "=h"(x.raw()) : "h"(x.raw()));
return x;
#else
return half_t(fast_tanh(float(x)));
#endif
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct fast_exp_op {
CUTLASS_HOST_DEVICE
T operator()(T const &rhs) const {
return fast_exp(rhs);
}
};
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 10) && (__CUDA_ARCH__ >= 750)
template <int N>
struct fast_exp_op<Array<half_t, N>> {
CUTLASS_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
// use x2 specialization
__half2 const *in = reinterpret_cast<__half2 const *>(&rhs);
__half2 *out = reinterpret_cast<__half2 *>(&result);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
out[i] = ::h2exp(in[i]);
}
// residual
if (N % 2) {
half_t last = rhs[N - 1];
result[N - 1] = half_t(::hexp(last.to_half()));
}
return result;
}
};
#endif // #if defined(__CUDA_ARCH__)
template <typename T, int N>
struct fast_exp_op<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &rhs) const {
fast_exp_op<T> fast_op;
Array<T, N> y;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = fast_op(rhs[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct fast_tanh_op {
CUTLASS_HOST_DEVICE
T operator()(T const &rhs) const {
return fast_tanh(rhs);
}
};
#if defined(__CUDA_ARCH__) && (__CUDACC_VER_MAJOR__ >= 11) && (__CUDA_ARCH__ >= 750)
template <int N>
struct fast_tanh_op<Array<half_t, N>> {
CUTLASS_DEVICE
Array<half_t, N> operator()(Array<half_t, N> const &rhs) const {
Array<half_t, N> result;
// use x2 specialization
uint32_t const *in = reinterpret_cast<uint32_t const *>(&rhs);
uint32_t *out = reinterpret_cast<uint32_t *>(&result);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
asm volatile ("tanh.approx.f16x2 %0, %1;" : "=r"(out[i]) : "r"(in[i]));
}
// residual
if (N % 2) {
uint16_t const *in = reinterpret_cast<uint16_t const *>(&rhs);
uint16_t *out = reinterpret_cast<uint16_t *>(&result);
asm volatile ("tanh.approx.f16 %0, %1;" : "=h"(out[N - 1]) : "h"(in[N - 1]));
}
return result;
}
};
#endif // #if defined(__CUDA_ARCH__)
template <typename T, int N>
struct fast_tanh_op<Array<T, N>> {
CUTLASS_HOST_DEVICE
Array<T, N> operator()(Array<T, N> const &rhs) const {
fast_tanh_op<T> fast_op;
Array<T, N> y;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
y[i] = fast_op(rhs[i]);
}
return y;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Absolute value function
template <typename T>
CUTLASS_HOST_DEVICE
T absolute_value(T x) {
if (x < T()) {
return -x;
}
return x;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 26,026 | C | 25.667008 | 116 | 0.597249 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/relatively_equal.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Performs comparison between two elements with support for floating-point comparisons.
*/
#pragma once
#include "numeric_types.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal(T a, T b, T epsilon, T nonzero_floor);
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace detail {
// This floating-point comparison function implements the method described in
//
// https://floating-point-gui.de/errors/comparison/
//
template <typename T>
CUTLASS_HOST_DEVICE
bool relatively_equal_float(T a, T b, T epsilon, T nonzero_floor) {
using std::abs;
T abs_A = abs(a);
T abs_B = abs(b);
T diff = abs(a - b);
T zero = T(0);
if (a == b) {
return true;
}
else if (a == zero || b == zero || diff < nonzero_floor) {
return diff < epsilon * nonzero_floor;
}
return diff < epsilon * (abs_A + abs_B);
}
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint1b_t>(uint1b_t a, uint1b_t b, uint1b_t, uint1b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int2b_t>(int2b_t a, int2b_t b, int2b_t, int2b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint2b_t>(uint2b_t a, uint2b_t b, uint2b_t, uint2b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int4b_t>(int4b_t a, int4b_t b, int4b_t, int4b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint4b_t>(uint4b_t a, uint4b_t b, uint4b_t, uint4b_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int8_t>(int8_t a, int8_t b, int8_t, int8_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint8_t>(uint8_t a, uint8_t b, uint8_t, uint8_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int16_t>(int16_t a, int16_t b, int16_t, int16_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint16_t>(uint16_t a, uint16_t b, uint16_t, uint16_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int32_t>(int32_t a, int32_t b, int32_t, int32_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint32_t>(uint32_t a, uint32_t b, uint32_t, uint32_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<int64_t>(int64_t a, int64_t b, int64_t, int64_t) {
return (a == b);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<uint64_t>(uint64_t a, uint64_t b, uint64_t, uint64_t) {
return (a == b);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<half_t>(half_t a, half_t b, half_t epsilon, half_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<bfloat16_t>(
bfloat16_t a,
bfloat16_t b,
bfloat16_t epsilon,
bfloat16_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<tfloat32_t>(
tfloat32_t a,
tfloat32_t b,
tfloat32_t epsilon,
tfloat32_t nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<float>(float a, float b, float epsilon, float nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
template <>
CUTLASS_HOST_DEVICE
bool relatively_equal<double>(double a, double b, double epsilon, double nonzero_floor) {
return detail::relatively_equal_float(a, b, epsilon, nonzero_floor);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 5,929 | C | 28.068627 | 100 | 0.628436 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/wmma_array.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types
and is safe to use in a union.
*/
#pragma once
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/functional.h"
namespace cutlass {
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Wmma array type (WmmaFragmentArray holds elements of of type nvcuda::wmma::fragment)
template <
/// Element type
typename T,
/// Number of elements in the array
int N
>
class WmmaFragmentArray: public Array<T, N, true> {
public:
/// Efficient clear method (override Array::clear())
CUTLASS_HOST_DEVICE
void clear()
{
for(int i = 0; i < Array<T, N, true>::kElements; i++)
{
nvcuda::wmma::fill_fragment((*this)[i], (typename T::element_type)0);
}
}
CUTLASS_HOST_DEVICE
WmmaFragmentArray<T, N>& operator+=(const WmmaFragmentArray<T, N>& rhs)
{
using element_type = typename T::element_type;
plus<T> add;
for (int i = 0; i < Array<T, N, true>::kElements; i++)
{
(*this)[i] = add((*this)[i], rhs[i]);
}
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| 3,359 | C | 34.74468 | 100 | 0.612087 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/integer_subbyte.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Defines a class for using integer types smaller than one byte in host or
device code.
*/
#pragma once
#if defined(__CUDACC_RTC__)
#include <cuda/std/cstdint>
#else
#include <cstdint>
#endif
#include "cutlass/platform/platform.h"
namespace cutlass {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// 4-bit signed integer type
template <int Bits, bool Signed = true>
struct integer_subbyte {
/// Number of bits
static int const kBits = Bits;
/// Whether type is signed
static bool const kSigned = Signed;
/// External type
using T = typename platform::conditional<kSigned, int, unsigned>::type;
/// Storage type
using Storage = uint8_t;
/// Bitmask used to truncate from larger integers
static Storage const kMask = Storage((1 << kBits) - 1);
//
// Data members
//
Storage storage;
//
// Methods
//
/// No operation
integer_subbyte() = default;
/// Conversion from integer type
CUTLASS_HOST_DEVICE
integer_subbyte(int value)
: storage(reinterpret_cast<Storage const &>(value) & kMask) {}
CUTLASS_HOST_DEVICE
integer_subbyte(unsigned value)
: storage(reinterpret_cast<Storage const &>(value) & kMask) {}
CUTLASS_HOST_DEVICE
integer_subbyte(double value) {
T tmp = static_cast<T>(value);
storage = Storage(reinterpret_cast<unsigned const &>(tmp) & kMask);
}
///
CUTLASS_HOST_DEVICE
operator T() const {
if (kSigned) {
// Sign extend
if (storage & Storage(1 << (kBits - 1))) {
return T(storage) | ~T(kMask);
}
}
return T(storage);
}
/// Equality
CUTLASS_HOST_DEVICE
bool operator==(integer_subbyte const &rhs) const {
return storage == rhs.storage;
}
/// Inequality
CUTLASS_HOST_DEVICE
bool operator!=(integer_subbyte const &rhs) const {
return storage != rhs.storage;
}
/// Less than or equal
CUTLASS_HOST_DEVICE
bool operator<=(integer_subbyte const &rhs) const {
if (kSigned) {
if (storage & (1 << (kBits - 1))) {
return !(rhs.storage < storage);
}
}
return storage < rhs.storage;
}
/// Less than
CUTLASS_HOST_DEVICE
bool operator<(integer_subbyte const &rhs) const {
if (kSigned) {
if (storage & (1 << (kBits - 1))) {
return !(rhs.storage <= storage);
}
}
return storage < rhs.storage;
}
/// Greater than or equal
CUTLASS_HOST_DEVICE
bool operator>=(integer_subbyte const &rhs) const {
return !(*this < rhs);
}
/// Greater than
CUTLASS_HOST_DEVICE
bool operator>(integer_subbyte const &rhs) const {
return !(*this <= rhs);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// 1-bit Unsigned integer type
using uint1b_t = integer_subbyte<1, false>;
/// 2-bit Integer type
using int2b_t = integer_subbyte<2, true>;
/// 2-bit Unsigned integer type
using uint2b_t = integer_subbyte<2, false>;
/// 4-bit Integer type
using int4b_t = integer_subbyte<4, true>;
/// 4-bit Unsigned integer type
using uint4b_t = integer_subbyte<4, false>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines the size of an element in bits - specialized for uint1b_t
template <>
struct sizeof_bits<uint1b_t> {
static int const value = 1;
};
/// Defines the size of an element in bits - specialized for int2b_t
template <>
struct sizeof_bits<int2b_t> {
static int const value = 2;
};
/// Defines the size of an element in bits - specialized for uint2b_t
template <>
struct sizeof_bits<uint2b_t> {
static int const value = 2;
};
/// Defines the size of an element in bits - specialized for int4b_t
template <>
struct sizeof_bits<int4b_t> {
static int const value = 4;
};
/// Defines the size of an element in bits - specialized for uint4b_t
template <>
struct sizeof_bits<uint4b_t> {
static int const value = 4;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace platform {
template <>
struct numeric_limits<cutlass::int4b_t> {
CUTLASS_HOST_DEVICE
static cutlass::int4b_t const lowest() noexcept { return -8;}
CUTLASS_HOST_DEVICE
static cutlass::int4b_t const max() noexcept { return 7;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<cutlass::uint4b_t> {
CUTLASS_HOST_DEVICE
static cutlass::uint4b_t const lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static cutlass::uint4b_t const max() noexcept { return 15;}
static constexpr bool is_integer = true;
};
template <>
struct numeric_limits<cutlass::uint1b_t> {
CUTLASS_HOST_DEVICE
static cutlass::uint1b_t const lowest() noexcept { return 0;}
CUTLASS_HOST_DEVICE
static cutlass::uint1b_t const max() noexcept { return 1;}
static constexpr bool is_integer = true;
};
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace platform
} // namespace cutlass
| 6,893 | C | 27.605809 | 100 | 0.621645 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/quaternion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines a densely packed quaternion object intended for storing data in registers and
executing quaternion operations within a CUDA or host thread.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/functional.h"
#include "cutlass/array.h"
#include "cutlass/real.h"
#include "cutlass/coord.h"
#include "cutlass/matrix.h"
#include "cutlass/fast_math.h"
#include "cutlass/layout/vector.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Quaternion: xi + yj + zk + w
template <
typename Element_ = float ///< element type
>
class Quaternion : public Array<Element_, 4> {
public:
/// Logical rank of tensor index space
static int const kRank = 1;
/// Number of elements
static int const kExtent = 4;
/// Base class is a four-element array
using Base = Array<Element_, kExtent>;
/// Element type
using Element = typename Base::Element;
/// Reference type to an element
using Reference = typename Base::reference;
/// Index type
using Index = int;
/// Quaternion storage - imaginary part
static int const kX = 0;
/// Quaternion storage - imaginary part
static int const kY = 1;
/// Quaternion storage - imaginary part
static int const kZ = 2;
/// Quaternion storage - real part
static int const kW = 3;
public:
//
// Methods
//
/// Constructs a quaternion q = 0
CUTLASS_HOST_DEVICE
Quaternion() {
Base::at(kX) = Element();
Base::at(kY) = Element();
Base::at(kZ) = Element();
Base::at(kW) = Element();
}
/// Constructs a quaternion q = w + 0*i + 0*j + 0*k
CUTLASS_HOST_DEVICE
Quaternion(
Element w_
) {
Base::at(kX) = Element();
Base::at(kY) = Element();
Base::at(kZ) = Element();
Base::at(kW) = w_;
}
/// Constructs a quaternion q = w + x*i + y*j + z*k
CUTLASS_HOST_DEVICE
Quaternion(
Element x_,
Element y_,
Element z_,
Element w_
) {
Base::at(kX) = x_;
Base::at(kY) = y_;
Base::at(kZ) = z_;
Base::at(kW) = w_;
}
/// Constructs a quaternion from a vector representing the imaginary part and a real number
CUTLASS_HOST_DEVICE
Quaternion(
Matrix3x1<Element> const &imag_,
Element w_ = Element()
) {
Base::at(kX) = imag_[0];
Base::at(kY) = imag_[1];
Base::at(kZ) = imag_[2];
Base::at(kW) = w_;
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(Index idx) const {
return Base::at(idx);
}
/// Returns a reference to the element at a given Coord
CUTLASS_HOST_DEVICE
Reference at(Index idx) {
return Base::at(idx);
}
/// Accesses the x element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element x() const {
return Base::at(kX);
}
/// Accesses the x element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference x() {
return Base::at(kX);
}
/// Accesses the y element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element y() const {
return Base::at(kY);
}
/// Accesses the y element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference y() {
return Base::at(kY);
}
/// Accesses the z element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Element z() const {
return Base::at(kZ);
}
/// Accesses the z element of the imaginary part of the quaternion
CUTLASS_HOST_DEVICE
Reference z() {
return Base::at(kZ);
}
/// Accesses the real part of the quaternion
CUTLASS_HOST_DEVICE
Element w() const {
return Base::at(kW);
}
/// Accesses the real part of the quaternion
CUTLASS_HOST_DEVICE
Reference w() {
return Base::at(kW);
}
/// Returns the pure imaginary part of the quaternion as a 3-vector
CUTLASS_HOST_DEVICE
Matrix3x1<Element> pure() const {
return Matrix3x1<Element>(x(), y(), z());
}
/// Returns a quaternion representation of a spatial rotation given a unit-length axis and
/// a rotation in radians.
CUTLASS_HOST_DEVICE
static Quaternion<Element> rotation(
Matrix3x1<Element> const &axis_unit, ///< axis of rotation (assumed to be unit length)
Element theta) { ///< angular rotation in radians
Element s = fast_sin(theta / Element(2));
return Quaternion(
s * axis_unit[0],
s * axis_unit[1],
s * axis_unit[2],
fast_cos(theta / Element(2))
);
}
/// Returns a quaternion representation of a spatial rotation represented as a
/// unit-length rotation axis (r_x, r_y, r_z) and an angular rotation in radians
CUTLASS_HOST_DEVICE
static Quaternion<Element> rotation(
Element r_x,
Element r_y,
Element r_z,
Element theta) { ///< angular rotation in radians
return rotation({r_x, r_y, r_z}, theta);
}
/// Geometric rotation of a 3-element vector
CUTLASS_HOST_DEVICE
Matrix3x1<Element> rotate(Matrix3x1<Element> const &rhs) const {
return (*this * Quaternion<Element>(rhs, 0) * reciprocal(*this)).pure();
}
/// Inverse rotation operation
CUTLASS_HOST_DEVICE
Matrix3x1<Element> rotate_inv(Matrix3x1<Element> const &rhs) const {
return (reciprocal(*this) * Quaternion<Element>(rhs, 0) * *this).pure();
}
/// Rotates a 3-vector assuming this is a unit quaternion (a spinor)
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor(Matrix3x1<Element> const &rhs) const {
return (*this * Quaternion<Element>(rhs, 0) * conj(*this)).pure();
}
/// Inverse rotation of 3-vector assuming this is a unit quaternion (a spinor)
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_inv(Matrix3x1<Element> const &rhs) const {
return (conj(*this) * Quaternion<Element>(rhs, 0) * *this).pure();
}
/// In-place addition
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator+=(Quaternion<Element> const &rhs) {
*this = (*this + rhs);
return *this;
}
/// In-place subtraction
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator-=(Quaternion<Element> const &rhs) {
*this = (*this - rhs);
return *this;
}
/// In-place multiplication
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator*=(Quaternion<Element> const &rhs) {
*this = (*this * rhs);
return *this;
}
/// Scalar multiplication
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator*=(Element s) {
*this = (*this * s);
return *this;
}
/// In-place Division
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator/=(Quaternion<Element> const &rhs) {
*this = (*this / rhs);
return *this;
}
/// In-place Division
template <typename T>
CUTLASS_HOST_DEVICE
Quaternion<Element> &operator/=(Element s) {
*this = (*this / s);
return *this;
}
/// Computes a 3x3 rotation matrix (row-major representation)
CUTLASS_HOST_DEVICE
Matrix3x3<Element> as_rotation_matrix_3x3() const {
Matrix3x3<Element> m(
w() * w() + x() * x() - y() * y() - z() * z(),
2 * x() * y() - 2 * w() * z(),
2 * x() * z() + 2 * w() * y(),
2 * x() * y() + 2 * w() * z(),
w() * w() - x() * x() + y() * y() - z() * z(),
2 * y() * z() - 2 * w() * x(),
2 * x() * z() - 2 * w() * y(),
2 * y() * z() + 2 * w() * x(),
w() * w() - x() * x() - y() * y() + z() * z()
);
return m;
}
/// Computes a 4x4 rotation matrix (row-major representation)
CUTLASS_HOST_DEVICE
Matrix4x4<Element> as_rotation_matrix_4x4() const {
Matrix4x4<Element> m = Matrix4x4<Element>::identity();
m.set_slice_3x3(as_rotation_matrix_3x3());
return m;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructs a quaternion that is non-zero only in its real element.
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(
Element w) { ///< real part
return Quaternion<Element>(w);
}
/// Constructs a quaternion from a vector and real
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(
Matrix3x1<Element> const &imag, ///< imaginary party as a vector
Element w) { ///< real part
return Quaternion<Element>(imag, w);
}
/// Constructs a quaternion from a unit-length rotation axis and a rotation
/// angle in radians
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_QuaternionRotation(
Matrix3x1<Element> const &axis_unit, ///< rotation axis (unit-length)
Element w) { ///< rotation angle in radians
return Quaternion<Element>::rotation(axis_unit, w);
}
/// Constructs a quaternion q = xi + yj + zk + w
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> make_Quaternion(Element x, Element y, Element z, Element w) {
return Quaternion<Element>(x, y, z, w);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the real part of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element const &real(Quaternion<Element> const &q) {
return q.w();
}
/// Returns the real part of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element &real(Quaternion<Element> &q) {
return q.w();
}
/// Returns the magnitude of the quaternion number
template <typename Element>
CUTLASS_HOST_DEVICE
Element abs(Quaternion<Element> const &q) {
return fast_sqrt(norm(q));
}
/// Quaternion conjugate
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> conj(Quaternion<Element> const &q) {
return make_Quaternion(
-q.x(),
-q.y(),
-q.z(),
q.w()
);
}
/// Computes the squared magnitude of the quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Element norm(Quaternion<Element> const &q) {
return q.x() * q.x() + q.y() * q.y() + q.z() * q.z() + q.w() * q.w();
}
/// Quaternion reciprocal
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> reciprocal(Quaternion<Element> const &q) {
Element nsq = norm(q);
return make_Quaternion(
-q.x() / nsq,
-q.y() / nsq,
-q.z() / nsq,
q.w() / nsq
);
}
/// Returns a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> unit(Quaternion<Element> const &q) {
Element rcp_mag = Element(1) / abs(q);
return make_Quaternion(
q.x() * rcp_mag,
q.y() * rcp_mag,
q.z() * rcp_mag,
q.w() * rcp_mag
);
}
/// Quaternion exponential
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> exp(Quaternion<Element> const &q) {
Element exp_ = fast_exp(q.w());
Element imag_norm = fast_sqrt(q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
Element sin_norm = fast_sin(imag_norm);
return make_Quaternion(
exp_ * q.x() * sin_norm / imag_norm,
exp_ * q.y() * sin_norm / imag_norm,
exp_ * q.z() * sin_norm / imag_norm,
exp_ * fast_cos(imag_norm)
);
}
/// Quaternion natural logarithm
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> log(Quaternion<Element> const &q) {
Element v = fast_sqrt(q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
Element s = fast_acos(q.w() / abs(q)) / v;
return make_Quaternion(
q.x() * s,
q.y() * s,
q.z() * s,
fast_log(q.w())
);
}
/// Gets the rotation angle from a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Element get_rotation_angle(Quaternion<Element> const &q_unit) {
return fast_acos(q_unit.w()) * Element(2);
}
/// Gets the rotation axis from a unit-length quaternion
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> get_rotation_axis(Quaternion<Element> const &q_unit) {
return q_unit.pure().unit();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Equality operator
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator==(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return lhs.x() == rhs.x() &&
lhs.y() == rhs.y() &&
lhs.z() == rhs.z() &&
lhs.w() == rhs.w();
}
/// Inequality operator
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator!=(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return !(lhs == rhs);
}
/// Quaternion scalar multiplication
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Quaternion<Element> q, Element s) {
return make_Quaternion(
q.x() * s,
q.y() * s,
q.z() * s,
q.w() * s
);
}
/// Quaternion scalar multiplication
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Element s, Quaternion<Element> const &q) {
return make_Quaternion(
s * q.x(),
s * q.y(),
s * q.z(),
s * q.w()
);
}
/// Quaternion scalar division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Quaternion<Element> const &q, Element s) {
return make_Quaternion(
q.x() / s,
q.y() / s,
q.z() / s,
q.w() / s
);
}
/// Quaternion unary negation
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator-(Quaternion<Element> const &q) {
return make_Quaternion(
-q.x(),
-q.y(),
-q.z(),
-q.w()
);
}
/// Quaternion addition
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator+(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.x() + rhs.x(),
lhs.y() + rhs.y(),
lhs.z() + rhs.z(),
lhs.w() + rhs.w()
);
}
/// Quaternion subtraction
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator-(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.x() - rhs.x(),
lhs.y() - rhs.y(),
lhs.z() - rhs.z(),
lhs.w() - rhs.w()
);
}
/// Quaternion product
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator*(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return make_Quaternion(
lhs.w() * rhs.x() + rhs.w() * lhs.x() + lhs.y() * rhs.z() - lhs.z() * rhs.y(),
lhs.w() * rhs.y() + rhs.w() * lhs.y() + lhs.z() * rhs.x() - lhs.x() * rhs.z(),
lhs.w() * rhs.z() + rhs.w() * lhs.z() + lhs.x() * rhs.y() - lhs.y() * rhs.x(),
lhs.w() * rhs.w() - lhs.x() * rhs.x() - lhs.y() * rhs.y() - lhs.z() * rhs.z()
);
}
/// Quaternion division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
return lhs * reciprocal(rhs);
}
/// Quaternion scalar division
template <typename Element>
CUTLASS_HOST_DEVICE
Quaternion<Element> operator/(Element s, Quaternion<Element> const &q) {
return s * reciprocal(q);
}
/// Comparison
template <typename Element>
CUTLASS_HOST_DEVICE
bool operator<(Quaternion<Element> const &lhs, Quaternion<Element> const &rhs) {
//TODO
return true;
}
/// Rotates a 3-vector assuming this is a unit quaternion (a spinor). This avoids computing
/// a reciprocal.
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_rotation(
Quaternion<Element> const &spinor, /// unit-length quaternion
Matrix3x1<Element> const &rhs) { /// arbitrary 3-vector
return (spinor * Quaternion<Element>(rhs, 0) * conj(spinor)).pure();
}
/// Inverse rotation of 3-vector assuming this is a unit quaternion (a spinor). This avoids computing
/// a reciprocal.
template <typename Element>
CUTLASS_HOST_DEVICE
Matrix3x1<Element> spinor_rotation_inv(
Quaternion<Element> const &spinor, /// unit-length quaternion
Matrix3x1<Element> const &rhs) { /// arbitrary 3-vector
return (conj(spinor) * Quaternion<Element>(rhs, 0) * spinor).pure();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Quaternion-valued type.
template <typename T>
struct RealType< Quaternion<T> > {
using Type = T;
/// Number of elements
static int const kExtent = Quaternion<T>::kExtent;
CUTLASS_HOST_DEVICE
static Quaternion<T> from_real(double x) {
return Quaternion<T>(static_cast<T>(x));
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Factories
////////////////////////////////////////////////////////////////////////////////////////////////////
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<half_t> from_real<cutlass::Quaternion<half_t> >(double r) {
return cutlass::Quaternion<half_t>(half_t(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<float> from_real<cutlass::Quaternion<float> >(double r) {
return cutlass::Quaternion<float>(float(r));
}
template <>
CUTLASS_HOST_DEVICE
cutlass::Quaternion<double> from_real<cutlass::Quaternion<double> >(double r) {
return cutlass::Quaternion<double>(r);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// functional.h numeric specializations
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
struct multiplies<Quaternion<T>> {
CUTLASS_HOST_DEVICE
Quaternion<T> operator()(Quaternion<T> lhs, Quaternion<T> const &rhs) const {
lhs = lhs * rhs;
return lhs;
}
};
/// Squares with optional conversion
template <typename T, typename Output>
struct magnitude_squared<Quaternion<T>, Output> {
CUTLASS_HOST_DEVICE
Output operator()(Quaternion<T> lhs) const {
multiplies<Output> mul_op;
Output y_w = Output(lhs.w());
Output y_x = Output(lhs.x());
Output y_y = Output(lhs.y());
Output y_z = Output(lhs.z());
return mul_op(y_w, y_w) + mul_op(y_x, y_x) + mul_op(y_y, y_y) + \
mul_op(y_z, y_z);
}
};
template <typename T>
struct multiply_add<Quaternion<T>, Quaternion<T>, Quaternion<T>> {
CUTLASS_HOST_DEVICE
Quaternion<T> operator()(
Quaternion<T> const &a,
Quaternion<T> const &b,
Quaternion<T> const &c) const {
T x = c.x();
T y = c.y();
T z = c.z();
T w = c.w();
x += a.w() * b.x();
x += b.w() * a.x();
x += a.y() * b.z();
x += -a.z() * b.y(),
y += a.w() * b.y();
y += b.w() * a.y();
y += a.z() * b.x();
y += -a.x() * b.z();
z += a.w() * b.z();
z += b.w() * a.z();
z += a.x() * b.y();
z += -a.y() * b.x();
w += a.w() * b.w();
w += -a.x() * b.x();
w += -a.y() * b.y();
w += -a.z() * b.z();
return cutlass::make_Quaternion(x, y, z, w);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 20,901 | C | 26.684768 | 101 | 0.599301 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/numeric_conversion.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*!
\file
\brief Boost-like numeric conversion operator for CUTLASS numeric types
*/
#pragma once
#if !defined(__CUDACC_RTC__)
#include <cfenv>
#endif
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/transform/thread/unary_op.h"
#include "cutlass/array.h"
#include "cutlass/half.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Floating-point rounding style similare to Standard Library's formats but supporting
/// additional rounding options.
enum class FloatRoundStyle {
round_indeterminate, ///< rounding mode unknown
round_toward_zero, ///< round toward zero
round_to_nearest, ///< round to nearest even
round_toward_infinity, ///< round toward infinity
round_toward_neg_infinity, ///< round toward negative infinity
round_half_ulp_truncate, ///< add 0.5ulp to integer representation then round toward zero
round_half_ulp_trunc_dntz ///< like round_half_ulp_truncate, except denorms are rounded *toward* zero
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest
>
struct NumericConverter {
using result_type = T;
using source_type = S;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<result_type>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => int32_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__)
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
return __float2int_rn(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
return __float2int_rz(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
#elif !defined(__CUDACC_RTC__)
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
return (result_type)std::nearbyint(s);
}
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<int32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
return (result_type)std::nearbyint(s);
}
result_type operator()(source_type const &s) {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float => int8_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__)
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_DEVICE
static result_type convert(source_type const & s) {
int32_t intermediate;
asm volatile("cvt.rzi.sat.s8.f32 %0, %1;" : "=r"(intermediate) : "f"(s));
return static_cast<result_type>(intermediate);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
#elif !defined(__CUDACC_RTC__)
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
static result_type convert(source_type const & s) {
std::fesetround(FE_TONEAREST);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<int8_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = int8_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
static result_type convert(source_type const & s) {
std::fesetround(FE_TOWARDZERO);
int32_t intermediate = (int32_t)std::nearbyint(s);
// Low-end saturation
intermediate = std::max(intermediate, (int32_t)std::numeric_limits<int8_t>::lowest());
// High-end saturation
intermediate = std::min(intermediate, (int32_t)std::numeric_limits<int8_t>::max());
return static_cast<result_type>(intermediate);
}
result_type operator()(source_type const &s) {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= half_t
template <typename T, FloatRoundStyle Round>
struct NumericConverter<T, T, Round> {
using result_type = T;
using source_type = T;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return s;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> half_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= half_t
template <FloatRoundStyle Round>
struct NumericConverter<float, half_t, Round> {
using result_type = float;
using source_type = half_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result = static_cast<float>(s);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Specialization for round-to-nearest
template <>
struct NumericConverter<half_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = half_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result = static_cast<half_t>(s);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Specialization for round-toward-zero
template <>
struct NumericConverter<half_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = half_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
/// Round toward zero
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & flt) {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
return half_t(__float2half_rz(flt));
#else
// software implementation rounds toward nearest even
unsigned const& s = reinterpret_cast<unsigned const &>(flt);
uint16_t sign = uint16_t((s >> 16) & 0x8000);
int16_t exp = uint16_t(((s >> 23) & 0xff) - 127);
int mantissa = s & 0x7fffff;
uint16_t u = 0;
if ((s & 0x7fffffff) == 0) {
// sign-preserving zero
return half_t::bitcast(sign);
}
if (exp > 15) {
if (exp == 128 && mantissa) {
// not a number
u = 0x7fff;
} else {
// overflow to infinity
u = sign | 0x7c00;
}
return half_t::bitcast(u);
}
if (exp >= -14) {
// normal fp32 to normal fp16
exp = uint16_t(exp + uint16_t(15));
u = uint16_t(((exp & 0x1f) << 10));
u = uint16_t(u | (mantissa >> 13));
} else {
// normal single-precision to subnormal half_t-precision representation
int rshift = (-14 - exp);
if (rshift < 32) {
mantissa |= (1 << 23);
mantissa = (mantissa >> rshift);
u = (uint16_t(mantissa >> 13) & 0x3ff);
} else {
mantissa = 0;
u = 0;
}
}
u |= sign;
return half_t::bitcast(u);
#endif // defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> bfloat16_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= bfloat16_t
template <FloatRoundStyle Round>
struct NumericConverter<float, bfloat16_t, Round> {
using result_type = float;
using source_type = bfloat16_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<float>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<bfloat16_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<bfloat16_t>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<bfloat16_t, float, FloatRoundStyle::round_half_ulp_truncate> {
using result_type = bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x32 = reinterpret_cast<uint32_t const &>(s);
#if defined(__CUDA_ARCH__)
if (::isfinite(s)) {
x32 += 0x8000;
}
#else
if (std::isfinite(s)) {
x32 += 0x8000;
}
#endif
uint16_t x16 = uint16_t((x32 >> 16) & 0xffff);
return bfloat16_t::bitcast(x16);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<bfloat16_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = bfloat16_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x32 = reinterpret_cast<uint32_t const &>(s);
uint16_t x16 = uint16_t(x32 >> 16);
return bfloat16_t::bitcast(x16);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for float <=> tfloat32_t
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for float <= tfloat32_t
template <FloatRoundStyle Round>
struct NumericConverter<float, tfloat32_t, Round> {
using result_type = float;
using source_type = tfloat32_t;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return static_cast<float>(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<tfloat32_t, float, FloatRoundStyle::round_to_nearest> {
using result_type = tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
unsigned storage = reinterpret_cast<unsigned const &>(s);
if ((storage & 0x7f800000) != 0x7f800000) {
bool mantissa_bit = ((storage & (1 << 13)) != 0);
bool round_bit = ((storage & (1 << 12)) != 0);
bool sticky_bit = ((storage & ((1 << 12) - 1)) != 0);
if ((round_bit && sticky_bit) || (round_bit && mantissa_bit)) {
storage += uint32_t(1 << 13);
}
// Note, the following is intentionally commented out. TF32
// does not define the low order bits, so they may be left in
// an undefined state.
//
// By not truncating these bit explicitly, we avoid an extra logical
// operation.
//
// TF32 may be implicitly converted to float by performing this
// operation as needed.
//
// storage = (storage & ~0x1fff);
}
else if (storage & ~0xff800000) {
storage = 0x7fffffff;
}
return tfloat32_t::bitcast(storage);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<tfloat32_t, float, FloatRoundStyle::round_half_ulp_truncate> {
using result_type = tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_truncate;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
return tfloat32_t::round_half_ulp_truncate(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// This rounding operation is similar to half_ulp_truncate except it rounds denorms toward zero.
/// It avoids predicated code, though it requires a temporary register.
template <>
struct NumericConverter<tfloat32_t, float, FloatRoundStyle::round_half_ulp_trunc_dntz> {
using result_type = tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_half_ulp_trunc_dntz;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
unsigned y = reinterpret_cast<unsigned const &>(s);
y = y & 0xff800000;
float d = reinterpret_cast<float const &>(y);
float z = d / float(1 << 11) + s;
return reinterpret_cast<result_type const &>(z);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <>
struct NumericConverter<tfloat32_t, float, FloatRoundStyle::round_toward_zero> {
using result_type = tfloat32_t;
using source_type = float;
static FloatRoundStyle const round_style = FloatRoundStyle::round_toward_zero;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
uint32_t x = reinterpret_cast<uint32_t const &>(s);
return tfloat32_t::bitcast(x & 0xffffe000);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion operator for float to tfloat32_t big and small values
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
FloatRoundStyle RoundBig = FloatRoundStyle::round_toward_zero,
FloatRoundStyle RoundSmall = FloatRoundStyle::round_half_ulp_truncate
>
struct NumericConverterFastF32 {
// result_type holds big tfloat32_t at idx(0) and small tfloat32_t at idx(1)
using result_type = Array<tfloat32_t, 2>;
// source data type
using source_type = float;
// rounding styles for big and small part
static FloatRoundStyle const kRoundBig = RoundBig;
static FloatRoundStyle const kRoundSmall = RoundSmall;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<tfloat32_t, float, kRoundBig> convert_big_;
NumericConverter<tfloat32_t, float, kRoundSmall> convert_small_;
// convert and fill tfloat32_t big at idx 0
result[0] = convert_big_(source);
// convert and fill tfloat32_t small at idx 1
result[1] = convert_small_(source - static_cast<float>(result[0]));
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion and Clamp operator for Integers
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S
>
struct NumericConverterClamp {
using result_type = T;
using source_type = S;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
NumericConverter<result_type, source_type> convert_op;
result_type const kClamp_max = platform::numeric_limits<result_type>::max();
result_type const kClamp_min = platform::numeric_limits<result_type>::lowest();
if (s < (source_type)kClamp_min)
return kClamp_min;
if (s > (source_type)kClamp_max)
return kClamp_max;
return convert_op(s);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Conversion operator for Array
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Conversion operator for Array
template <
typename T,
typename S,
int N,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest,
typename Transform = cutlass::transform::thread::UnaryTransform::Identity
>
struct NumericArrayConverter {
using result_type = Array<T, N>;
using source_type = Array<S, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & s) {
result_type result;
NumericConverter<T, S, Round> convert_;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
if( platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value )
{
result[i] = convert_(s[i]);
} else { // conjugate
result[i] = conj(convert_(s[i]));
}
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
template <
typename T,
int N,
FloatRoundStyle Round,
typename Transform
>
struct NumericArrayConverter<T, T, N, Round, Transform> {
using result_type = Array<T, N>;
using source_type = Array<T, N>;
static FloatRoundStyle const round_style = Round;
static_assert(platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value ||
platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Conjugate>::value,
"Unary Operator not supported.");
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
if( platform::is_same<Transform, cutlass::transform::thread::UnaryTransform::Identity>::value )
{
return s;
} else {
result_type result;
for (int i = 0; i < N; ++i) {
result[i] = conj(s[i]);
}
return result;
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half, 2> <= Array<float, 2>, round to nearest
template <>
struct NumericArrayConverter<half_t, float, 2, FloatRoundStyle::round_to_nearest> {
using result_type = Array<half_t, 2>;
using source_type = Array<float, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
Array<half_t, 2> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
reinterpret_cast<__half2 &>(result) = __float22half2_rn(reinterpret_cast<float2 const &>(source));
#else
NumericConverter<half_t, float, round_style> convert_;
result[0] = convert_(source[0]);
result[1] = convert_(source[1]);
#endif
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float, 2> <= Array<half_t, 2>, round to nearest
template <FloatRoundStyle Round>
struct NumericArrayConverter<float, half_t, 2, Round> {
using result_type = Array<float, 2>;
using source_type = Array<half_t, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
Array<float, 2> result;
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
reinterpret_cast<float2 &>(result) = __half22float2(reinterpret_cast<__half2 const &>(source));
#else
NumericConverter<float, half_t, round_style> convert_;
result[0] = convert_(source[0]);
result[1] = convert_(source[1]);
#endif
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<half_t, float, N, Round> {
using result_type = Array<half_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<half_t, float, 2, Round> convert_vector_;
NumericConverter<half_t, float, Round> convert_element_;
result_type result;
Array<half_t, 2> *result_ptr = reinterpret_cast<Array<half_t, 2> *>(&result);
Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<half> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float, half_t, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<half_t, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<float, half_t, 2, Round> convert_vector_;
NumericConverter<float, half_t, Round> convert_element_;
result_type result;
Array<float, 2> *result_ptr = reinterpret_cast<Array<float, 2> *>(&result);
Array<half_t, 2> const *source_ptr = reinterpret_cast<Array<half_t, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<bfloat16_t, 2> <= Array<float, 2>, round to nearest
template <>
struct NumericArrayConverter<bfloat16_t, float, 2, FloatRoundStyle::round_to_nearest> {
using result_type = Array<bfloat16_t, 2>;
using source_type = Array<float, 2>;
static FloatRoundStyle const round_style = FloatRoundStyle::round_to_nearest;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned d;
asm("cvt.rn.bf16x2.f32 %0, %1, %2;\n" : "=r"(d) : "f"(source[1]), "f"(source[0]) );
return reinterpret_cast<result_type const &>(d);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<bfloat16_t> <= Array<float>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<bfloat16_t, float, N, Round> {
using result_type = Array<bfloat16_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<bfloat16_t, float, 2, Round> convert_vector_;
NumericConverter<bfloat16_t, float, Round> convert_element_;
result_type result;
Array<bfloat16_t, 2> *result_ptr = reinterpret_cast<Array<bfloat16_t, 2> *>(&result);
Array<float, 2> const *source_ptr = reinterpret_cast<Array<float, 2> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 2; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
if (N % 2) {
result[N - 1] = convert_element_(source[N - 1]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
#endif // if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800)
/////////////////////////////////////////////////////////////////////////////////////////////////
// Conditional guards to enable partial specialization for packed integers
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 720) && \
((__CUDACC_VER_MAJOR__ > 10) || \
((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Partial specialization for Array<int8_t, 1> <= Array<int, 1>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 1, Round> {
using result_type = Array<int8_t, 1>;
using source_type = Array<int, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<int8_t, int, Round> convert_element_;
result_type result;
result[0] = convert_element_(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<int8_t, 2> <= Array<int, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 2, Round> {
using result_type = Array<int8_t, 2>;
using source_type = Array<int, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
uint32_t tmp;
asm volatile(
"cvt.pack.sat.s8.s32.b32 %0, %2, %1, 0;\n"
: "=r"(tmp) : "r"(source[0]), "r"(source[1]));
uint16_t out = (tmp & 0xffff);
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<int8_t, 4> <= Array<int, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, 4, Round> {
using result_type = Array<int8_t, 4>;
using source_type = Array<int, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.s8.s32.b32 r4, %4, %3, 0;"
"cvt.pack.sat.s8.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<int8_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, int, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<int8_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<int8_t, int, 4, Round> convert_vector_;
result_type result;
Array<int8_t, 4> *result_ptr = reinterpret_cast<Array<int8_t, 4> *>(&result);
Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 1> <= Array<int, 1>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 1, Round> {
using result_type = Array<uint8_t, 1>;
using source_type = Array<int, 1>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericConverter<uint8_t, int, Round> convert_element_;
result_type result;
result[0] = convert_element_(source[0]);
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 2> <= Array<int, 2>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 2, Round> {
using result_type = Array<uint8_t, 2>;
using source_type = Array<int, 2>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
uint32_t tmp;
asm volatile(
"cvt.pack.sat.u8.s32.b32 %0, %2, %1, 0;\n"
: "=r"(tmp) : "r"(source[0]), "r"(source[1]));
uint16_t out = (tmp & 0xffff);
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<uint8_t, 4> <= Array<int, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, 4, Round> {
using result_type = Array<uint8_t, 4>;
using source_type = Array<int, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.u8.s32.b32 r4, %4, %3, 0;"
"cvt.pack.sat.u8.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out) : "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<int8_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint8_t, int, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<uint8_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<uint8_t, int, 4, Round> convert_vector_;
result_type result;
Array<uint8_t, 4> *result_ptr = reinterpret_cast<Array<uint8_t, 4> *>(&result);
Array<int, 4> const *source_ptr = reinterpret_cast<Array<int, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
#endif
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float, N> <=> Array<float_e4m3_t, N>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float, float_e4m3_t, 4, Round> {
using result_element = float;
using source_element = float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e4m3x2 %0, lo;\n" \
"cvt.rn.f16x2.e4m3x2 %1, hi;\n" \
"}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0]));
float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1]));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
out[2] = res1.x;
out[3] = res1.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float, 4, Round> {
using result_element = float_e4m3_t;
using source_element = float;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e4m3x2.f32 lo, %2, %1;\n" \
"cvt.rn.satfinite.e4m3x2.f32 hi, %4, %3;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float, float_e5m2_t, 4, Round> {
using result_element = float;
using source_element = float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out_fp16[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e5m2x2 %0, lo;\n" \
"cvt.rn.f16x2.e5m2x2 %1, hi;\n" \
"}\n" : "=r"(out_fp16[0]), "=r"(out_fp16[1]) : "r"(src_packed));
float2 res0 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[0]));
float2 res1 = __half22float2(reinterpret_cast<__half2 &>(out_fp16[1]));
result_type out;
out[0] = res0.x;
out[1] = res0.y;
out[2] = res1.x;
out[3] = res1.y;
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, float, 4, Round> {
using result_element = float_e5m2_t;
using source_element = float;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e5m2x2.f32 lo, %2, %1;\n" \
"cvt.rn.satfinite.e5m2x2.f32 hi, %4, %3;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "f"(source[0]), "f"(source[1]), "f"(source[2]), "f"(source[3]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<half_t, 4> <=> Array<float_e4m3_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<half_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<half_t, float_e4m3_t, 4, Round> {
using result_element = half_t;
using source_element = float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e4m3x2 %0, lo;\n" \
"cvt.rn.f16x2.e4m3x2 %1, hi;\n" \
"}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<half_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, half_t, 4, Round> {
using result_element = float_e4m3_t;
using source_element = half_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source);
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e4m3x2.f16x2 lo, %1;\n" \
"cvt.rn.satfinite.e4m3x2.f16x2 hi, %2;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<half_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<half_t, float_e5m2_t, 4, Round> {
using result_element = half_t;
using source_element = float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out[2];
uint32_t const& src_packed = reinterpret_cast<uint32_t const&>(source);
asm volatile( \
"{\n" \
".reg .b16 lo, hi;\n" \
"mov.b32 {lo, hi}, %2;\n" \
"cvt.rn.f16x2.e5m2x2 %0, lo;\n" \
"cvt.rn.f16x2.e5m2x2 %1, hi;\n" \
"}\n" : "=r"(out[0]), "=r"(out[1]) : "r"(src_packed));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<half_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, half_t, 4, Round> {
using result_element = float_e5m2_t;
using source_element = half_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
uint32_t out;
uint32_t const* src_packed = reinterpret_cast<uint32_t const*>(&source);
asm volatile( \
"{\n" \
".reg .b16 lo;\n" \
".reg .b16 hi;\n" \
"cvt.rn.satfinite.e5m2x2.f16x2 lo, %1;\n" \
"cvt.rn.satfinite.e5m2x2.f16x2 hi, %2;\n" \
"mov.b32 %0, {lo, hi};\n" \
"}" \
: "=r"(out) : "r"(src_packed[0]), "r"(src_packed[1]));
return reinterpret_cast<result_type const &>(out);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<bfloat16_t, 4> <=> Array<float_e4m3_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<bfloat16_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<bfloat16_t, float_e4m3_t, 4, Round> {
using result_element = bfloat16_t;
using source_element = float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert f8 to float
NumericArrayConverter<float, source_element, 4, Round> src2float;
Array<float, 4> tmp_floats = src2float(source);
// Convert float to bf16
result_type out;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats);
Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out);
NumericArrayConverter<result_element, float, 2, Round> float2result;
packed_out[0] = float2result(packed_tmp[0]);
packed_out[1] = float2result(packed_tmp[1]);
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<bfloat16_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, bfloat16_t, 4, Round> {
using result_element = float_e4m3_t;
using source_element = bfloat16_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert bf16 to float
Array<float, 4> tmp;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp);
Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source);
NumericArrayConverter<float, source_element, 2, Round> src2float;
packed_tmp[0] = src2float(packed_source[0]);
packed_tmp[1] = src2float(packed_source[1]);
// Convert float to f8
NumericArrayConverter<result_element, float, 4, Round> float2result;
return float2result(tmp);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<bfloat16_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<bfloat16_t, float_e5m2_t, 4, Round> {
using result_element = bfloat16_t;
using source_element = float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert f8 to float
NumericArrayConverter<float, source_element, 4, Round> src2float;
Array<float, 4> tmp_floats = src2float(source);
// Convert float to bf16
result_type out;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp_floats);
Array<result_element, 2>* packed_out = reinterpret_cast<Array<result_element, 2>*>(&out);
NumericArrayConverter<result_element, float, 2, Round> float2result;
packed_out[0] = float2result(packed_tmp[0]);
packed_out[1] = float2result(packed_tmp[1]);
return out;
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<bfloat16_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, bfloat16_t, 4, Round> {
using result_element = float_e5m2_t;
using source_element = bfloat16_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
#if defined(CUDA_PTX_FP8_CVT_ENABLED)
// Convert bf16 to float
Array<float, 4> tmp;
Array<float, 2>* packed_tmp = reinterpret_cast<Array<float, 2>*>(&tmp);
Array<source_element, 2> const* packed_source = reinterpret_cast<Array<source_element, 2> const*>(&source);
NumericArrayConverter<float, source_element, 2, Round> src2float;
packed_tmp[0] = src2float(packed_source[0]);
packed_tmp[1] = src2float(packed_source[1]);
// Convert float to f8
NumericArrayConverter<result_element, float, 4, Round> float2result;
return float2result(tmp);
#else
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
#endif
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for Array<float_e4m3_t, 4> <=> Array<float_e5m2_t, 4>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float_e5m2_t, 4, Round> {
using result_element = float_e4m3_t;
using source_element = float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, float_e4m3_t, 4, Round> {
using result_element = float_e5m2_t;
using source_element = float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
result[i] = converter(source[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specializations for:
// Array<float_e4m3_t, 4> <=> Array<float_e4m3_t, 4>
// Array<float_e5m2_t, 4> <=> Array<float_e5m2_t, 4>
//
// These are needed to avoid multiple-matching-template compilation errors (e.g., when
// compiling float_e4m3_t <=> float_e4m3_t, which among T <= float_e4m3_t and float_e4m3_t <= T
// should be used?)
//
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<float_e4m3_t, 4> <= Array<float_e4m3_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float_e4m3_t, 4, Round> {
using result_element = float_e4m3_t;
using source_element = float_e4m3_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return s;
}
};
/// Partial specialization for Array<float_e5m2_t, 4> <= Array<float_e5m2_t, 4>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, float_e5m2_t, 4, Round> {
using result_element = float_e5m2_t;
using source_element = float_e5m2_t;
using result_type = Array<result_element, 4>;
using source_type = Array<source_element, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return s;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
//
// Partial specialziations for:
// Array<T, N> <=> Array<float_e4m3_t, N>
// Array<T, N> <=> Array<float_e5m2_t, N>
// using packed converter under the hood
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
typename T,
typename S,
int N,
FloatRoundStyle Round
>
struct PackedNumericArrayConverter {
using result_element = T;
using source_element = S;
using result_type = Array<result_element, N>;
using source_type = Array<source_element, N>;
static FloatRoundStyle const round_style = Round;
private:
using packed_result_type = Array<result_element, 4>;
using packed_source_type = Array<source_element, 4>;
public:
CUTLASS_DEVICE
static result_type convert(source_type const & source) {
result_type result;
packed_result_type* packed_result = reinterpret_cast<packed_result_type*>(&result);
const packed_source_type* packed_source = reinterpret_cast<const packed_source_type*>(&source);
NumericArrayConverter<result_element, source_element, 4, Round> packed_converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
packed_result[i] = packed_converter(packed_source[i]);
}
// Handle leftovers
NumericConverter<result_element, source_element, Round> converter;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N % 4; ++i) {
int idx = ((N / 4) * 4) + i;
result[idx] = converter(source[idx]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<T, N> <= Array<float_e4m3_t, N>
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<T, float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<T, float_e4m3_t, N, Round> {};
/// Partial specialization for Array<T, N> <= Array<float_e5m2_t, N>
template <
typename T,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<T, float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<T, float_e5m2_t, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<S, N>
template <
typename S,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, S, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, S, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<S, N>
template <
typename S,
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, S, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, S, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e5m2_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, float_e5m2_t, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e4m3_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, float_e4m3_t, N, Round> {};
/// Partial specialization for Array<float_e4m3_t, N> <= Array<float_e4m3_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e4m3_t, float_e4m3_t, N, Round> :
public PackedNumericArrayConverter<float_e4m3_t, float_e4m3_t, N, Round> {};
/// Partial specialization for Array<float_e5m2_t, N> <= Array<float_e5m2_t, N>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<float_e5m2_t, float_e5m2_t, N, Round> :
public PackedNumericArrayConverter<float_e5m2_t, float_e5m2_t, N, Round> {};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for Array<int8_t> <= Array<float>
/// Conversion is performed with saturation regardless of setting of
/// the `Round` template parameter.
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int8_t, float, N, Round> {
using result_type = Array<int8_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
// Convert float to int
Array<int32_t, N> temporary;
NumericArrayConverter<int, float, N, Round> compute_converter;
temporary = compute_converter(source);
// Convert to int to int8_t
NumericArrayConverter<int8_t, int32_t, N, Round> destination_converter;
return destination_converter(temporary);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 750) && \
((__CUDACC_VER_MAJOR__ > 10) || \
((__CUDACC_VER_MAJOR__ >= 10) && (__CUDACC_VER_MINOR__ >= 2)))
/// Partial specialization for Array<int4b_t, 8> <= Array<int, 8>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, int, 8, Round> {
using result_type = Array<int4b_t, 8>;
using source_type = Array<int, 8>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.s4.s32.b32 r4, %8, %7, 0;"
"cvt.pack.sat.s4.s32.b32 r4, %6, %5, r4;"
"cvt.pack.sat.s4.s32.b32 r4, %4, %3, r4;"
"cvt.pack.sat.s4.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out)
: "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]),
"r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<int4b_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<int4b_t, int, N, Round> {
static_assert(!(N % 8), "N must be multiple of 8.");
using result_type = Array<int4b_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<int4b_t, int, 8, Round> convert_vector_;
result_type result;
Array<int4b_t, 8> *result_ptr = reinterpret_cast<Array<int4b_t, 8> *>(&result);
Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 8; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<uint4b_t, 8> <= Array<int, 8>
template <
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, int, 8, Round> {
using result_type = Array<uint4b_t, 8>;
using source_type = Array<int, 8>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
unsigned out;
asm volatile(
"{ .reg .u32 r4;"
"cvt.pack.sat.u4.s32.b32 r4, %8, %7, 0;"
"cvt.pack.sat.u4.s32.b32 r4, %6, %5, r4;"
"cvt.pack.sat.u4.s32.b32 r4, %4, %3, r4;"
"cvt.pack.sat.u4.s32.b32 %0, %2, %1, r4;"
"}"
: "=r"(out)
: "r"(source[0]), "r"(source[1]), "r"(source[2]), "r"(source[3]),
"r"(source[4]), "r"(source[5]), "r"(source[6]), "r"(source[7]));
return reinterpret_cast<result_type const &>(out);
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
/// Partial specialization for Array<int4b_t> <= Array<int>
template <
int N,
FloatRoundStyle Round
>
struct NumericArrayConverter<uint4b_t, int, N, Round> {
static_assert(!(N % 8), "N must be multiple of 8.");
using result_type = Array<uint4b_t, N>;
using source_type = Array<int, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_HOST_DEVICE
static result_type convert(source_type const & source) {
NumericArrayConverter<uint4b_t, int, 8, Round> convert_vector_;
result_type result;
Array<uint4b_t, 8> *result_ptr = reinterpret_cast<Array<uint4b_t, 8> *>(&result);
Array<int, 8> const *source_ptr = reinterpret_cast<Array<int, 8> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 8; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_HOST_DEVICE
result_type operator()(source_type const &s) {
return convert(s);
}
};
#endif // Conditional guards to enable partial specialization for packed integers
/////////////////////////////////////////////////////////////////////////////////////////////////
/// FastNumericArrayConverter only works when the source is within center range.
/// Conversion operator for Array. See the comments before
/// FastLinearCombinationClamp.
template <typename T, typename S, int N,
FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
struct FastNumericArrayConverter {
using result_type = Array<T, N>;
using source_type = Array<S, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &s) {
result_type result;
NumericArrayConverter<T, S, N, Round> convert_;
return convert_(s);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) { return convert(s); }
};
/// Partial specialization for Array<float> <= Array<int>
template <typename T, int N, FloatRoundStyle Round>
struct FastNumericArrayConverter<float, T, N, Round> {
using result_type = Array<float, N>;
using source_type = Array<T, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
result_type result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int tmp = source[i] + 1262485504 /*0x4B400000*/;
result[i] = reinterpret_cast<float const &>(tmp) - 12582912.0f;
}
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) { return convert(s); }
};
/// Partial specialization for Array<int8_t, 4> <= Array<float, 4>
template <FloatRoundStyle Round>
struct FastNumericArrayConverter<int8_t, float, 4, Round> {
using result_type = Array<int8_t, 4>;
using source_type = Array<float, 4>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
Array<int32_t, 4> result;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < 4; ++i) {
float tmp = source[i] + 12582912.0f;
result[i] = reinterpret_cast<int32_t const &>(tmp);
}
result[0] = __byte_perm(result[0], result[1], 0x40);
result[2] = __byte_perm(result[2], result[3], 0x40);
result[0] = __byte_perm(result[0], result[2], 0x5410);
return reinterpret_cast<result_type const &>(result[0]);
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) { return convert(s); }
};
/// Partial specialization for Array<int8_t> <= Array<float>
template <int N, FloatRoundStyle Round>
struct FastNumericArrayConverter<int8_t, float, N, Round> {
static_assert(!(N % 4), "N must be multiple of 4.");
using result_type = Array<int8_t, N>;
using source_type = Array<float, N>;
static FloatRoundStyle const round_style = Round;
CUTLASS_DEVICE
static result_type convert(source_type const &source) {
FastNumericArrayConverter<int8_t, float, 4, Round> convert_vector_;
result_type result;
Array<int8_t, 4> *result_ptr =
reinterpret_cast<Array<int8_t, 4> *>(&result);
Array<float, 4> const *source_ptr =
reinterpret_cast<Array<float, 4> const *>(&source);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N / 4; ++i) {
result_ptr[i] = convert_vector_(source_ptr[i]);
}
return result;
}
CUTLASS_DEVICE
result_type operator()(source_type const &s) { return convert(s); }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Defines preferred rounding mode for a pair of types
template <typename T, typename S>
struct PreferredRoundingMode {
static FloatRoundStyle const kRound = FloatRoundStyle::round_to_nearest;
};
/// Defines preferred rounding mode for a pair of types
template <>
struct PreferredRoundingMode<tfloat32_t, float> {
static FloatRoundStyle const kRound = FloatRoundStyle::round_half_ulp_truncate;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Packs predicates into an array.
template <int N>
struct PackPredicates {
using result_type = Array<uint1b_t, N>;
static_assert(!(N % 4), "Must pack predicates in a count that is a multiple of 4");
CUTLASS_HOST_DEVICE
result_type operator()(bool const predicates[]) {
result_type packed;
packed.clear();
int const kWordSize = 8;
uint8_t *bytes = reinterpret_cast<uint8_t *>(packed.data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int word_idx = (i / kWordSize);
int bit_idx = (i % kWordSize);
uint8_t mask = ((predicates[i] ? 1u : 0u) << bit_idx);
bytes[word_idx] = (bytes[word_idx] | mask);
}
return packed;
}
};
/// Packs predicates into an array
template <int N>
struct UnpackPredicates {
using result_type = Array<uint1b_t, N>;
static_assert(!(N % 4), "Must unpack predicates in a count that is a multiple of 4");
CUTLASS_HOST_DEVICE
void operator()(bool predicates[], result_type const &packed) {
int const kWordSize = 8;
uint8_t const *bytes = reinterpret_cast<uint8_t const *>(packed.data());
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < N; ++i) {
int word_idx = (i / kWordSize);
int bit_idx = (i % kWordSize);
predicates[i] = bool((bytes[word_idx] >> bit_idx) & 0x1);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 71,278 | C | 27.718372 | 111 | 0.617848 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/subbyte_reference.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Provides a mechanism for packing and unpacking elements smaller than one byte
*/
#pragma once
#include "cutlass/numeric_types.h"
namespace cutlass {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This class provides a mechanism for packing and unpacking elements smaller than one byte. It
/// assumes these sub-byte elements are packed in a traditional C++ numeric type.
///
/// The intended application is to provide a mechanism to indirectly reference elements in
/// memory or Array<> objects whose addresses cannot otherwise be taken since they are smaller
/// than one byte.
///
/// Supports basic pointer arithmetic:
///
/// Example:
///
/// int4b_t *ptr = ...;
///
/// SubbyteReference<int4b_t> ref = ptr;
/// ref += 15;
///
/// int4b_t x = ref; // load an int4b_t
/// ref = x + 2_s4; // perform arithmetic on int4b_t and then store
///
template <
typename Element_, /// CUTLASS numeric element type.
typename Storage_ = uint8_t /// Underlying storage type. Must be able to hold an integer
/// number of objects of type Element.
>
class ConstSubbyteReference {
public:
using Element = Element_;
using Storage = Storage_;
using StoragePointer = Storage const *;
static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value,
"Size of Element must not be greater than Storage.");
static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value),
"Storage must be divisible by Element");
private:
///! Number of elements per storage vector
int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value;
///! Bit mask
Storage const kMask =
((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ?
(Storage(1) << sizeof_bits<Element>::value) - Storage(1) :
~Storage(0));
private:
/// Pointer to array containing element
StoragePointer ptr_;
/// Offset (in units of elements) from pointer.
///
/// Invariant: must always be in range [0, kElementsPerVector)
int offset_;
public:
CUTLASS_HOST_DEVICE
ConstSubbyteReference(): ptr_(nullptr), offset_(0) { }
/// Constructor
CUTLASS_HOST_DEVICE
ConstSubbyteReference(
Element const *ptr, /// pointer to memory
int64_t offset /// logical offset in units of Element
):
ptr_(reinterpret_cast<StoragePointer>(ptr)),
offset_(0) {
int64_t offset_in_vectors = offset / kElementsPerVector;
int64_t offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = int(offset_in_elements);
}
/// Constructor
CUTLASS_HOST_DEVICE
ConstSubbyteReference(
Element *ptr = nullptr
): ConstSubbyteReference(ptr, 0) { }
/// Gets storage pointer
CUTLASS_HOST_DEVICE
StoragePointer storage_pointer() const {
return ptr_;
}
/// Gets element offset within storage vector
CUTLASS_HOST_DEVICE
int element_offset() const {
return offset_;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
Element get() const {
Storage item = Storage((*ptr_ >> (offset_ * sizeof_bits<Element>::value)) & kMask);
return reinterpret_cast<Element const &>(item);
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
operator Element() const {
return get();
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator+=(int offset) {
offset += offset_;
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator+=(long long offset) {
offset += offset_;
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator-=(int offset) {
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference &operator-=(long long offset) {
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator+(int offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator+(long long offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator-(int offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
ConstSubbyteReference operator-=(long long offset) const {
ConstSubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Computes the difference in elements between references
CUTLASS_HOST_DEVICE
ptrdiff_t operator-(ConstSubbyteReference ref) const {
return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to signed 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator int64_t() const {
return int64_t(get());
}
/// Explicit cast to unsigned 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return uint64_t(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
/// Explicit cast to double
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(get());
}
};
template <
typename Element_, /// CUTLASS numeric element type.
typename Storage_ = uint8_t /// Underlying storage type. Must be able to hold an integer
/// number of objects of type Element.
>
class SubbyteReference {
public:
using Element = Element_;
using Storage = Storage_;
using StoragePointer = Storage *;
static_assert(sizeof_bits<Element>::value <= sizeof_bits<Storage>::value,
"Size of Element must not be greater than Storage.");
static_assert(!(sizeof_bits<Storage>::value % sizeof_bits<Element>::value),
"Storage must be divisible by Element");
private:
///! Number of elements per storage vector
int const kElementsPerVector = sizeof_bits<Storage>::value / sizeof_bits<Element>::value;
///! Bit mask
Storage const kMask =
((sizeof_bits<Element>::value < sizeof_bits<Storage>::value) ?
(Storage(1) << sizeof_bits<Element>::value) - Storage(1) :
~Storage(0));
private:
/// Pointer to array containing element
StoragePointer ptr_;
/// Offset (in units of elements) from pointer.
///
/// Invariant: must always be in range [0, kElementsPerVector)
int offset_;
public:
CUTLASS_HOST_DEVICE
SubbyteReference(): ptr_(nullptr), offset_(0) { }
/// Constructor
CUTLASS_HOST_DEVICE
SubbyteReference(
Element *ptr, /// pointer to memory
int64_t offset /// logical offset in units of Element
):
ptr_(reinterpret_cast<StoragePointer>(ptr)),
offset_(0) {
int64_t offset_in_vectors = offset / kElementsPerVector;
int64_t offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = int(offset_in_elements);
}
/// Constructor
CUTLASS_HOST_DEVICE
SubbyteReference(
Element *ptr = nullptr
): SubbyteReference(ptr, 0) { }
/// Gets storage pointer
CUTLASS_HOST_DEVICE
StoragePointer storage_pointer() const {
return ptr_;
}
/// Gets storage pointer
CUTLASS_HOST_DEVICE
Element * operator&() const {
return reinterpret_cast<Element *>(ptr_);
}
/// Gets element offset within storage vector
CUTLASS_HOST_DEVICE
int element_offset() const {
return offset_;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
Element get() const {
Storage item = Storage((*ptr_ >> (offset_ * sizeof_bits<Element>::value)) & kMask);
return reinterpret_cast<Element const &>(item);
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference & set(Element const &x) {
Storage item = (reinterpret_cast<Storage const &>(x) & kMask);
Storage kUpdateMask = Storage(~(kMask << (offset_ * sizeof_bits<Element>::value)));
*ptr_ = Storage((*ptr_ & kUpdateMask) | Storage(item << (offset_ * sizeof_bits<Element>::value)));
return *this;
}
/// Unpacks an element from memory
CUTLASS_HOST_DEVICE
operator Element() const {
return get();
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(Element const & x) {
return set(x);
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(SubbyteReference const & x) {
return set(x.get());
}
/// Stores an element to memory
CUTLASS_HOST_DEVICE
SubbyteReference &operator=(
ConstSubbyteReference<Element, Storage> const &x) {
return set(x.get());
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator+=(int offset) {
offset += offset_;
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator+=(long long offset) {
offset += offset_;
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ += offset_in_vectors;
offset_ = offset_in_elements;
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator-=(int offset) {
int offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = offset % kElementsPerVector;
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Adds an offset in units of elements to the reference
CUTLASS_HOST_DEVICE
SubbyteReference &operator-=(long long offset) {
long long offset_in_vectors = offset / kElementsPerVector;
int offset_in_elements = int(offset % kElementsPerVector);
ptr_ -= offset_in_vectors;
offset_ -= offset_in_elements;
if (offset_ < 0) {
offset_ += kElementsPerVector;
--ptr_;
}
return *this;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator+(int offset) const {
SubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator+(long long offset) const {
SubbyteReference ref(ptr_, offset_);
ref += offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator-(int offset) const {
SubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Returns a reference to an element with a given offset from the current reference
CUTLASS_HOST_DEVICE
SubbyteReference operator-=(long long offset) const {
SubbyteReference ref(ptr_, offset_);
ref -= offset;
return ref;
}
/// Computes the difference in elements between references
CUTLASS_HOST_DEVICE
ptrdiff_t operator-(SubbyteReference ref) const {
return (ptr_ - ref.ptr_) * kElementsPerVector + (offset_ - ref.offset_);
}
/// Explicit cast to int
CUTLASS_HOST_DEVICE
explicit operator int() const {
return int(get());
}
/// Explicit cast to signed 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator int64_t() const {
return int64_t(get());
}
/// Explicit cast to unsigned 64-bit integer
CUTLASS_HOST_DEVICE
explicit operator uint64_t() const {
return uint64_t(get());
}
/// Explicit cast to float
CUTLASS_HOST_DEVICE
explicit operator float() const {
return float(get());
}
/// Explicit cast to double
CUTLASS_HOST_DEVICE
explicit operator double() const {
return double(get());
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Element, bool subbyte = (sizeof_bits<Element>::value < 8)>
struct ReferenceFactory;
template <typename Element>
struct ReferenceFactory<Element, false> {
CUTLASS_HOST_DEVICE
static Element &get(Element *ptr, int64_t offset) {
return ptr[offset];
}
CUTLASS_HOST_DEVICE
static Element const &get(Element const *ptr, int64_t offset) {
return ptr[offset];
}
};
template <typename Element>
struct ReferenceFactory<Element, true> {
CUTLASS_HOST_DEVICE
static SubbyteReference<Element> get(Element *ptr, int64_t offset) {
return SubbyteReference<Element>(ptr, offset);
}
CUTLASS_HOST_DEVICE
static ConstSubbyteReference<Element> get(Element const *ptr,
int64_t offset) {
return ConstSubbyteReference<Element>(ptr, offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace cutlass
| 16,587 | C | 26.600666 | 102 | 0.657563 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/gemm.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines common types used for all GEMM-like operators.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/coord.h"
namespace cutlass {
namespace gemm {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// GEMM operand enumeration: D = A * B + C
enum class Operand {
kA, /// A multiplicand
kB, /// B multiplicand
kC, /// Source accumulator
kD /// Destination accumulator
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Shape of a matrix multiply-add operation
template <
/// Rows of matrix product
int M = 1,
/// Columns of matrix product
int N = 1,
/// Inner dimension of matrix product
int K = 1
>
struct GemmShape {
static int const kM = M;
static int const kN = N;
static int const kK = K;
static int const kMN = M * N;
static int const kMK = M * K;
static int const kKN = N * K;
static int const kMNK = M * N * K;
static int const kCount = kMNK;
//
// Static member functions
//
/// Returns a Coord object
CUTLASS_HOST_DEVICE
static Coord<3> toCoord() {
return make_Coord(kM, kN, kK);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Type alias of the transpose of a GemmShape
template <
/// concept: GemmShape
typename Shape
>
using GemmShapeTranspose = GemmShape<Shape::kN, Shape::kM, Shape::kK>;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// GemmCoord is a structure derived from Coord<3> that specifies a location within the
/// coordinate space of a GEMM problem.
struct GemmCoord : public Coord<3, int> {
/// Integer-valued index
typedef int Index;
/// Base type is a Coord of rank=3
typedef Coord<3, Index> Base;
/// GEMM M dimension - rows of the output C matrix
static int const kM = 0;
/// GEMM N dimension - columns of the output C matrix
static int const kN = 1;
/// GEMM K dimension - inner dimension of the GEMM problem
static int const kK = 2;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
GemmCoord() { }
/// Constructs from Coord<3> and a batch
CUTLASS_HOST_DEVICE
GemmCoord(Coord<3, Index> const &coord): Base(make_Coord(coord[0], coord[1], coord[2])) { }
/// Helper to construct from a K, N, M, batch variables
CUTLASS_HOST_DEVICE
GemmCoord(Index m, Index n, Index k): Base(make_Coord(m, n, k)) { }
/// Returns the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index const & m() const { return this->at(kM); }
/// Returns reference to the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index & m() { return this->at(kM); }
/// Returns the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns reference to the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index const & k() const { return this->at(kK); }
/// Returns reference to the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index & k() { return this->at(kK); }
/// Obtains a Coord<3> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<3> mnk() const {
return make_Coord(m(), n(), k());
}
/// Obtains a Coord<3> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<3> knm() const {
return make_Coord(k(), n(), m());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> nm() const {
return make_Coord(n(), m());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> mn() const {
return make_Coord(m(), n());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> mk() const {
return make_Coord(m(), k());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> km() const {
return make_Coord(k(), m());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> nk() const {
return make_Coord(n(), k());
}
/// Obtains a Coord<2> from GemmCoord
CUTLASS_HOST_DEVICE
Coord<2> kn() const {
return make_Coord(k(), n());
}
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
GemmCoord operator+(Base const& b) const {
return GemmCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
GemmCoord operator-(Base const& b) const {
return GemmCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
GemmCoord operator*(Base const& b) const {
return GemmCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
GemmCoord operator/(Base const& b) const {
return GemmCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
GemmCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
GemmCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
GemmCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
GemmCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
/// BatchedGemmCoord is a structure derived from Coord<4> that specifies a location within the
/// coordinate space of a batched GEMM problem.
struct BatchedGemmCoord : public Coord<4, int> {
/// Integer-valued index
typedef int Index;
/// Base type is a Coord of rank=4
typedef Coord<4, Index> Base;
/// GEMM M dimension - rows of the output C matrix
static int const kM = 0;
/// GEMM N dimension - columns of the output C matrix
static int const kN = 1;
/// GEMM K dimension - inner dimension of the GEMM problem
static int const kK = 2;
/// GEMM Batch dimension - inner dimension of the GEMM problem
static int const kBatch = 3;
//
// Methods
//
/// Default ctor
CUTLASS_HOST_DEVICE
BatchedGemmCoord() { }
/// Constructs from Coord<4>
CUTLASS_HOST_DEVICE
BatchedGemmCoord(Base const &coord): Base(coord) { }
/// Helper to construct from a K, N, M, and batch variables
CUTLASS_HOST_DEVICE
BatchedGemmCoord(Index m, Index n, Index k, Index b): Base(make_Coord(m, n, k, b)) { }
/// Returns the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index const & m() const { return this->at(kM); }
/// Returns reference to the GEMM M coordinate
CUTLASS_HOST_DEVICE
Index & m() { return this->at(kM); }
/// Returns the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index const & n() const { return this->at(kN); }
/// Returns reference to the GEMM N coordinate
CUTLASS_HOST_DEVICE
Index & n() { return this->at(kN); }
/// Returns the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index const & k() const { return this->at(kK); }
/// Returns reference to the GEMM K coordinate
CUTLASS_HOST_DEVICE
Index & k() { return this->at(kK); }
/// Returns the GEMM batch coordinate
CUTLASS_HOST_DEVICE
Index const & batch() const { return this->at(kBatch); }
/// Returns reference to the GEMM batch coordinate
CUTLASS_HOST_DEVICE
Index & batch() { return this->at(kBatch); }
/// Obtains a GemmCoord from BatchedGemmCoord
CUTLASS_HOST_DEVICE
GemmCoord mnk() const {
return GemmCoord(m(), n(), k());
}
/// Obtains a Coord<4> from BatchedGemmCoord
CUTLASS_HOST_DEVICE
Coord<4> mnkb() const {
return make_Coord(m(), n(), k(), batch());
}
//
// Coord operators
//
/// Element-wise addition
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator+(Base const& b) const {
return BatchedGemmCoord(Base::operator+(b));
}
/// Element-wise subtraction
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator-(Base const& b) const {
return BatchedGemmCoord(Base::operator-(b));
}
/// Element-wise multiplication
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator*(Base const& b) const {
return BatchedGemmCoord(Base::operator*(b));
}
/// Element-wise division
CUTLASS_HOST_DEVICE
BatchedGemmCoord operator/(Base const& b) const {
return BatchedGemmCoord(Base::operator/(b));
}
/// In-place addition
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator+=(Base const& b) {
Base::operator+=(b);
return *this;
}
/// In-place subtraction
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator-=(Base const& b) {
Base::operator-=(b);
return *this;
}
/// In-place multiplication
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator*=(Base const& b) {
Base::operator*=(b);
return *this;
}
/// In-place division
CUTLASS_HOST_DEVICE
BatchedGemmCoord& operator/=(Base const& b) {
Base::operator/=(b);
return *this;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
enum class GemmUniversalMode {
kGemm,
kGemmSplitKParallel,
kBatched,
kArray,
kInvalid
};
////////////////////////////////////////////////////////////////////////////////
/// Some options for clearing shared memory
enum class SharedMemoryClearOption {
kNone, ///< SMEM is in don't-care state
kZfill, ///< Kernels fill out of bounds accesses with zeros
kClearLastStage ///< Last SMEM stage is explicitly cleared. Mainloop uses 'kNone'
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////////////////////////
| 11,570 | C | 26.098361 | 100 | 0.609507 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h | /*! \file
\brief This defines a "fragment" iterator for visiting the fragments of a warp tile
that participate in one warp-level mma operation.
Typically, this is used to access the accumulator tile/fragement of a warp-level mma operation.
The accumulator tile is then partitioned into smaller tiles/fragments that can be fed into
next warp-level mma operation.
This iterator is necessary to accomplish warp-level mma fusion where the accumulator tile is
reused as multiplicand tile for the next mma.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/numeric_conversion.h"
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Size of the accumulation tile shape (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on the fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator;
// Partial specialization for col-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::ColumnMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kElementsPerAccess = InstructionShape::kM * InstructionShape::kN / kThreads;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
static int const kResidualIndex = kResidualColumn / Shape::kColumn
* (AccumulatorShape::kRow / Shape::kRow);
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerAccess>;
using FragmentAccessType = Array<Element, kElementsPerAccess>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerAccess>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] = output_op(accumulators_[accumulator_access_offset]);
}
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment &bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * MmaIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; n++) {
for (int m = 0; m < MmaIterations::kRow; m++) {
int accumulator_access_offset =
n * AccumulatorIterations::kRow + m + index;
frag_ptr[m * MmaIterations::kColumn + n].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[m * MmaIterations::kColumn + n] =
output_op(accumulators_[accumulator_access_offset],
scale_ptr[n] /*scale*/, bias_ptr[n] /*bias*/);
}
}
}
};
// Partial specialization for row-major accumulator tile
template <
/// Shape of warp tile to load (concept: MatrixShape)
typename Shape_,
/// Shape of the warp accumulation tile (concept: MatrixShape)
typename AccumulatorShape_,
/// KBlocks columns to compute residual
int KBlocksColumn_,
/// Accumulator Element type
typename ElementAccumulator_,
/// Element type
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Output operation on fragment
typename OutputOp_>
class MmaTensorOpFragmentIterator<Shape_, AccumulatorShape_, KBlocksColumn_, ElementAccumulator_, Element_,
cutlass::layout::RowMajor,
InstructionShape_, OutputOp_> {
public:
/// Shape of warp tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Shape of the warp accumulation tile (concept: MatrixShape)
using AccumulatorShape = AccumulatorShape_;
/// KBlocks columns to compute residual
static int const kKBlockColumn = KBlocksColumn_;
/// Accumulator Element type
using ElementAccumulator = ElementAccumulator_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Output operation on fragment
using OutputOp = OutputOp_;
/// Number of participating threads
static int const kThreads = 32;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(
AccumulatorShape::kRow == Shape::kRow,
"Rows of Warp Accumulator must be the same as rows of warp");
static_assert(
!(AccumulatorShape::kColumn % Shape::kColumn),
"Shape of Warp Accumulator must be divisible by warp shape.");
static_assert(
!(kKBlockColumn % Shape::kColumn),
"KBlock size must be divisible by warp shape.");
/// Number of times this iterator can be incremented
static int const kIterations = AccumulatorShape::kCount / Shape::kCount;
};
private:
static int const kRowsPerIteration = 8;
static int const kColumnsPerIteration = 16;
static int const kElementsPerIteration = kRowsPerIteration * InstructionShape::kN / kThreads;
static int const kElementsPerAccess = kRowsPerIteration * kColumnsPerIteration / kThreads;
static int const kIterationsPerAccess = kElementsPerAccess / kElementsPerIteration;
// Number of iterations per actual instruction
static int const kIterationsPerInstruction = InstructionShape::kM / kRowsPerIteration;
static int const kAccessStride = kIterationsPerInstruction;
/// Number of mma operations performed by a warp
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
/// Number of mma operations performed by the entire accumulator
using AccumulatorIterations = MatrixShape<AccumulatorShape::kRow / InstructionShape::kM,
AccumulatorShape::kColumn / InstructionShape::kN>;
/// Number of Accesses in a warp
using AccessIterations = MatrixShape<MmaIterations::kRow * kIterationsPerInstruction,
MmaIterations::kColumn / kIterationsPerAccess>;
/// Number of K iterations
static int const kKBlockIterations = (AccumulatorShape::kColumn + kKBlockColumn - 1) / kKBlockColumn;
static int const kResidualColumn = AccumulatorShape::kColumn - (kKBlockIterations - 1) * kKBlockColumn;
static int const kKBlockColumnIterations = kKBlockColumn / Shape::kColumn;
static int const kResidualIndex = kResidualColumn / Shape::kColumn;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
/// This is the fragment size produced by one access of the iterator.
using Fragment = Array<Element, Shape::kCount / kThreads>;
/// Accumulator Fragment object
using AccumulatorFragment = Array<ElementAccumulator, AccumulatorShape::kCount / kThreads>;
/// Scale Bias Element Type
using ElementScaleBias = typename OutputOp::ElementCompute;
/// Scale Bias Fragment object
using ScaleBiasFragment = Array<ElementScaleBias, InstructionShape::kM * InstructionShape::kK / kThreads>;
private:
/// Internal access type
using AccessType = Array<ElementAccumulator, kElementsPerIteration>;
using FragmentAccessType = Array<Element, kElementsPerIteration>;
using ScaleBiasAccessType = Array<ElementScaleBias, kElementsPerIteration>;
private:
//
// Data members
//
/// Accumulator tile
AccessType const *accumulators_;
/// Internal index
int index_;
/// Used to access residual tile first
bool is_residual_tile_;
public:
/// Constructs an iterator
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator(AccumulatorFragment const &accum)
: accumulators_(reinterpret_cast<AccessType const *>(&accum)),
index_(0), is_residual_tile_(true) {}
/// Add offset
CUTLASS_HOST_DEVICE
void add_offset(int index_offset) {
index_ += index_offset;
if(is_residual_tile_ && index_ >= kKBlockColumnIterations) {
index_ = index_ - kKBlockColumnIterations + kResidualIndex;
is_residual_tile_ = false;
}
}
/// Increments
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator++() {
add_offset(1);
return *this;
}
/// Decrements
CUTLASS_HOST_DEVICE
MmaTensorOpFragmentIterator &operator--() {
add_offset(-1);
return *this;
}
CUTLASS_HOST_DEVICE
void set_index(int idx) {
index_ = idx;
}
/// Loads a fragment from the referenced part of the accumulator tile
CUTLASS_HOST_DEVICE
void load(Fragment &frag, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(accumulators_[accumulator_access_offset + j * kAccessStride]);
}
index++;
}
}
/// Loads a fragment from the referenced part of the accumulator tile
/// Then apply per-channel scale and bias
CUTLASS_HOST_DEVICE
void load(Fragment &frag, ScaleBiasFragment &scale,
ScaleBiasFragment & bias, OutputOp output_op) const {
if (output_op.is_source_needed()) //beta must be zero
assert(0);
FragmentAccessType *frag_ptr = reinterpret_cast<FragmentAccessType *>(&frag);
ScaleBiasAccessType * scale_ptr = reinterpret_cast<ScaleBiasAccessType *>(&scale);
ScaleBiasAccessType * bias_ptr = reinterpret_cast<ScaleBiasAccessType *>(&bias);
int index = index_ * AccessIterations::kCount;
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < AccessIterations::kCount; i++) {
int accumulator_access_offset = index / AccessIterations::kCount * (MmaIterations::kColumn * kIterationsPerInstruction) +
(index % AccessIterations::kCount) / (AccessIterations::kColumn * kIterationsPerInstruction) *
AccumulatorIterations::kColumn * kIterationsPerInstruction +
(index % (AccessIterations::kColumn * kIterationsPerInstruction)) / kIterationsPerInstruction *
(kIterationsPerInstruction * kIterationsPerAccess) +
(index % kIterationsPerInstruction);
int scale_bias_offset = (index
% (kIterationsPerInstruction * AccessIterations::kColumn))
* kIterationsPerAccess;
CUTLASS_PRAGMA_UNROLL
for (int j = 0; j < kIterationsPerAccess; j++) {
frag_ptr[i*kIterationsPerAccess + j].clear();
if(!(is_residual_tile_ && index_ >= kResidualIndex))
frag_ptr[i*kIterationsPerAccess + j] = output_op(
accumulators_[accumulator_access_offset + j * kAccessStride],
scale_ptr[scale_bias_offset + j], bias_ptr[scale_bias_offset + j]);
}
index++;
}
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 18,643 | C | 34.243856 | 131 | 0.661321 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/softmax_scale_bias_transform.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per-channel softmax before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentNormSum>
struct SoftmaxScaleBiasTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumNormSum = FragmentNormSum::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns and 2 rows
static int const MmaCols = 2;
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using NormSumOperand = Array<__half2, MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations,
NormSumOperand const &norm_sum) {
__half2* packed_activations = reinterpret_cast<__half2*>(&activations);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < MmaElements / 2; ++i) {
__half2 out = ::h2exp(__hsub2(packed_activations[i], norm_sum[2*i]));
packed_activations[i] = __hmul2(out, norm_sum[2*i + 1]);
}
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentNormSum const &norm_sum) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
NormSumOperand const *ptr_norm_sum =
reinterpret_cast<NormSumOperand const *>(&norm_sum);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i],
ptr_norm_sum[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 4,610 | C | 38.076271 | 100 | 0.637961 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_fast_f32.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/functional.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
namespace detail {
template <
/// Data type of real & imag members of complex numbers in the SourceFragment
typename RealElement,
/// Destination fragment required by the mma operation
typename DestinationFragment,
/// Source fragment holding complex<RealElement> elements
typename SourceFragment,
/// Number of mma operations performed
typename MmaIterations,
/// Shape of operand elements
typename MmaOperandShape,
/// Complex transform on A operand
ComplexTransform Transform_,
/// Operand A or Operand B
Operand Operand_,
/// Floating-point rounding style for big part
FloatRoundStyle RoundBig_,
/// Floating-point rounding style for small part
FloatRoundStyle RoundSmall_>
struct UnpackComplexConvertAndPackForMmaFastF32;
// Partial specialization for OperandA and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle RoundBig_,
FloatRoundStyle RoundSmall_>
struct UnpackComplexConvertAndPackForMmaFastF32 <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kA,
RoundBig_,
RoundSmall_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kA;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRoundBig = RoundBig_;
static FloatRoundStyle const kRoundSmall = RoundSmall_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElementBig, MmaElementSmall <= RealElement
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
// Operand layout parameters
using SourceFragmentLayout = layout::ColumnMajor;
static int const kLdm = MmaIterations::kRow * MmaOperandShape::kRow;
// BigSmall Fragment holding two TF32 elements (big, small) for every float
using BigSmallFragment = Array<MmaElement, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMmaFastF32() {}
CUTLASS_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
DestinationFragment *dest_big_ = reinterpret_cast<DestinationFragment*>(dest);
DestinationFragment *dest_small_ = reinterpret_cast<DestinationFragment*>(&dest[MmaIterations::kRow * 2]);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kRow; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r + i * MmaOperandShape::kRow;
int col = c;
// Access complex<RealElement> and apply rounding on real and imag parts
BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real());
BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_big_[i][pos] = a[kBigIndex];
dest_big_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]);
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_small_[i][pos] = a[kSmallIndex];
dest_small_[i+MmaIterations::kRow][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]);
// Next position
pos++;
}
}
}
}
};
// Partial specialization for OperandB and Congruous smem layout
template <
typename RealElement,
typename DestinationFragment,
typename SourceFragment,
typename MmaIterations,
typename MmaOperandShape,
ComplexTransform Transform_,
FloatRoundStyle RoundBig_,
FloatRoundStyle RoundSmall_>
struct UnpackComplexConvertAndPackForMmaFastF32 <
RealElement,
DestinationFragment,
SourceFragment,
MmaIterations,
MmaOperandShape,
Transform_,
Operand::kB,
RoundBig_,
RoundSmall_> {
//
// Type definitions
//
static Operand const kOperand = Operand::kB;
static ComplexTransform const kTransform = Transform_;
static FloatRoundStyle const kRoundBig = RoundBig_;
static FloatRoundStyle const kRoundSmall = RoundSmall_;
// Data type of elements in the destination fragment
using MmaElement = typename DestinationFragment::Element;
// Numeric convertor MmaElementBig, MmaElementSmall <= RealElement
using Converter = NumericConverterFastF32<kRoundBig, kRoundSmall>;
// Operand layout parameters
using SourceFragmentLayout = layout::RowMajor;
static int const kLdm = MmaIterations::kColumn * MmaOperandShape::kColumn;
// BigSmall Fragment holding two TF32 elements (big, small) for every float
using BigSmallFragment = Array<MmaElement, 2>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
/// Ctor
CUTLASS_DEVICE
UnpackComplexConvertAndPackForMmaFastF32() {}
CUTLASS_HOST_DEVICE
void operator()(DestinationFragment *dest, SourceFragment const &source) {
Converter convert_op;
SourceFragmentLayout layout(kLdm);
DestinationFragment *dest_big_ = reinterpret_cast<DestinationFragment*>(dest);
DestinationFragment *dest_small_ = reinterpret_cast<DestinationFragment*>(&dest[MmaIterations::kColumn * 2]);
CUTLASS_PRAGMA_UNROLL
for(int i=0; i<MmaIterations::kColumn; i++) {
int pos = 0;
CUTLASS_PRAGMA_UNROLL
for(int c=0; c<MmaOperandShape::kColumn; c++) {
CUTLASS_PRAGMA_UNROLL
for(int r=0; r<MmaOperandShape::kRow; r++) {
// Logical position of element in source fragment
int row = r;
int col = c + i * MmaOperandShape::kColumn;
// Access complex<RealElement> apply rounding on real and imag parts
BigSmallFragment a = convert_op(source[layout(MatrixCoord{row,col})].real());
BigSmallFragment b = convert_op(source[layout(MatrixCoord{row,col})].imag());
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_big_[i][pos] = a[kBigIndex];
dest_big_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kBigIndex] : b[kBigIndex]);
// Unpack rounded complex<MmaElement> and pack into DestinationFragment for mma operation
dest_small_[i][pos] = a[kSmallIndex];
dest_small_[i+MmaIterations::kColumn][pos] = (kTransform == ComplexTransform::kConjugate ? -b[kSmallIndex] : b[kSmallIndex]);
// next position
pos++;
}
}
}
}
};
} // namespace detail
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Used for partial specialization
typename Enable = bool
>
class MmaComplexTensorOpFastF32;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex:
// Operands data type: complex<float>
// Rounding: float -> tfloat32_t (round half_ulp_truncate nearest)
// Math instruction: mma.sync.aligned.m16n8k8.f32.tf32.tf32.f32
// Output data type: complex<float>
//
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB,
/// Used for partial specialization
typename Enable
>
class MmaComplexTensorOpFastF32<
Shape_,
complex<float>,
LayoutA_,
complex<float>,
LayoutB_,
complex<float>,
LayoutC_,
Policy_,
TransformA,
TransformB,
Enable> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of members of complex multiplicand A
using RealElementA = float;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of members of complex multiplicand B
using RealElementB = float;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of members of complex accumulator matrix C
using RealElementC = float;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddComplexFastF32;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Tune F32 to TF32 big small conversion for complex<float> operation
/// Different combination of big small conversin can cause different tradeoff
/// between speed and accuracy. Generally, use round_half_ulp_truncate can
/// improve the performance but hur the accuracy.
using ComplexFastF32 = FastF32 <
FloatRoundStyle::round_toward_zero, // kRoundBigA
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallA
FloatRoundStyle::round_toward_zero, // kRoundBigB
FloatRoundStyle::round_half_ulp_truncate, // kRoundSmallB
TensorFloat32Op::k3xTF32 // Number of TF32 operations
>;
/// Index in fargments for the big and small part
static int const kBigIndex = 0;
static int const kSmallIndex = 1;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
// (4 times the original FragmentA::kElements)
// (real_big), (imag_big), (real_small), (imag_small)
using TransformedFragmentA = Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements * 2 * 2>;
// Fragment bisecting big and small sections
// (real_big, imag_big), (real_small, imag_small)
using AccessTypeFragmentA = Array<typename ArchMmaOperator::ElementA,
FragmentA::kElements * 2>;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
// (4 times the original FragmentB::kElements)
// (real_big), (imag_big), (real_small), (imag_small)
using TransformedFragmentB = Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements * 2 * 2>;
// Fragment bisecting big and small sections
// (real_big, imag_big), (real_small, imag_small)
using AccessTypeFragmentB = Array<typename ArchMmaOperator::ElementB,
FragmentB::kElements * 2>;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of complex products operations performed (one complex product needs four mma instructions)
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'planar complex' in the sense that all real-valued
/// parts are stored consecutively followed by all imaginary parts. This matches the structure
/// of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
//
// Alias types for underlying real-valued matrix multiply operator
//
using InstMmaOperandA = typename ArchMmaOperator::FragmentA;
using InstMmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(platform::is_same<cutlass::gemm::GemmShape<16, 8, 8>, typename ArchMmaOperator::Shape>::value,
"This implementation only supports mma.m16n8k8 math instructions.");
static_assert(InstMmaOperandA::kElements == 4,
"This implementation only supports math instructions in which exactly four element is needed for the A operand."
"We can geneneralize later.");
static_assert(InstMmaOperandB::kElements == 2,
"This implementation only supports math instructions in which exactly two element is needed for the B operand."
"We can geneneralize later.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaComplexTensorOpFastF32() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
TransformedFragmentA const &A,
TransformedFragmentB const &B,
FragmentC const &C
) const {
AccessTypeFragmentA const *complex_A = reinterpret_cast<AccessTypeFragmentA const*>(&A);
AccessTypeFragmentB const *complex_B = reinterpret_cast<AccessTypeFragmentB const*>(&B);
//
// Accumulate in place
//
D = C;
complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kBigIndex], D);
complex_mma_operator(D, complex_A[kBigIndex], complex_B[kSmallIndex], D);
complex_mma_operator(D, complex_A[kBigIndex], complex_B[kBigIndex], D);
if (ComplexFastF32::kPrecision == TensorFloat32Op::k4xTF32)
complex_mma_operator(D, complex_A[kSmallIndex], complex_B[kSmallIndex], D);
}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void complex_mma_operator(
FragmentC &D,
AccessTypeFragmentA const &complex_A,
AccessTypeFragmentB const &complex_B,
FragmentC const &C
) const {
// Instruction Operands A & B holding real part followed by imaginary part for mma operations
InstMmaOperandA const *operand_A = reinterpret_cast<InstMmaOperandA const *>(&complex_A);
InstMmaOperandB const *operand_B = reinterpret_cast<InstMmaOperandB const *>(&complex_B);
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.real(), a.real(), b.real(), accum.real());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m], operand_B[n], *accum);
}
// mma(accum.imag(), a.real(), b.imag(), accum.imag());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m], operand_B[n+MmaIterations::kColumn], *accum);
}
// mma(accum.real(), a.imag(), -b.imag(), accum.real())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// negate OperandB to accumulate -(a.imag()*b.imag())
// negating OperandB emits less instrucitons than negating OperandA as OperandB has less elements
negate<InstMmaOperandB> negate_op;
// Real-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_A[m+MmaIterations::kRow], negate_op(operand_B[n+MmaIterations::kColumn]), *accum);
}
// mma(accum.imag(), a.imag(), b.real(), accum.imag())
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Complex-valued accumulator part
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_A[m+MmaIterations::kRow], operand_B[n], *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
detail::UnpackComplexConvertAndPackForMmaFastF32 <
RealElementA,
InstMmaOperandA,
FragmentA,
MmaIterations,
MatrixShape<2, 2>,
kTransformA,
Operand::kA,
ComplexFastF32::kRoundBigA,
ComplexFastF32::kRoundSmallA> convert_A;
detail::UnpackComplexConvertAndPackForMmaFastF32 <
RealElementB,
InstMmaOperandB,
FragmentB,
MmaIterations,
MatrixShape<2, 1>,
kTransformB,
Operand::kB,
ComplexFastF32::kRoundBigB,
ComplexFastF32::kRoundSmallB> convert_B;
// Convert Fragment[A|B] holding complex<RealElement[A|B]> to InstMmaOperand[A|B] holding InstMmaOperand[A|B]::Element
convert_A(reinterpret_cast<InstMmaOperandA *>(&dst_A), A);
convert_B(reinterpret_cast<InstMmaOperandB *>(&dst_B), B);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 23,132 | C | 33.838855 | 135 | 0.676422 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/layernorm_scale_bias_transform.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level per channel scale+bias+relu before
matrix multiply-accumulate operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/platform/platform.h"
#include "cutlass/numeric_conversion.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename FragmentActivations, typename FragmentVarMean, typename FragmentGammaBeta>
struct LayernormScaleBiasTransform {
using T = typename FragmentActivations::Element;
static int const NumActivations = FragmentActivations::kElements;
static int const NumVarMean = FragmentVarMean::kElements;
static int const NumGammaBeta = FragmentGammaBeta::kElements;
static int const MmaElements = 2;
// One element has one scale and one bias
static int const MmaScaleBiasPair = 2;
// 16816 has 2 columns and 2 rows
static int const MmaCols = 2;
static int const MmaRows = 2;
using MmaOperand = Array<T, MmaElements>;
using VarMeanOperand = Array<__half2, MmaScaleBiasPair>;
using GammaBetaOperand = Array<T, MmaElements * MmaScaleBiasPair>;
CUTLASS_DEVICE
void transform(MmaOperand &activations,
VarMeanOperand const &var_mean,
GammaBetaOperand const &gamma_beta) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
uint32_t *ptr_activations = reinterpret_cast<uint32_t *>(&activations);
uint32_t const *ptr_var_mean = reinterpret_cast<uint32_t const *>(&var_mean);
uint32_t const *ptr_gamma_beta = reinterpret_cast<uint32_t const *>(&gamma_beta);
// Apply per channel scale+bias+relu if the data is not a special NaN
// (0x7eff). If it is a special NaN (0x7eff), hard code the output to 0.
// We assumes the pair of FP16 are either both inbound or both out-of-bound.
// It requires C to be an even number.
asm volatile(
"{\n\t"
" fma.rn.f16x2 %0, %1, %2, %3;\n"
" fma.rn.f16x2 %0, %4, %0, %5;\n"
"}\n"
: "=r"(ptr_activations[0])
: "r"(ptr_var_mean[0]), "r"(ptr_activations[0]),
"r"(ptr_var_mean[1]),
"r"(ptr_gamma_beta[0]), "r"(ptr_gamma_beta[1]));
#else
// TODO: write emulation code
assert(0);
#endif
}
CUTLASS_DEVICE
void operator()(FragmentActivations &activations,
FragmentVarMean const &var_mean,
FragmentGammaBeta const &gamma_beta) {
MmaOperand *ptr_activations = reinterpret_cast<MmaOperand *>(&activations);
VarMeanOperand const *ptr_var_mean =
reinterpret_cast<VarMeanOperand const *>(&var_mean);
GammaBetaOperand const *ptr_gamma_beta =
reinterpret_cast<GammaBetaOperand const *>(&gamma_beta);
CUTLASS_PRAGMA_UNROLL
for (int i = 0; i < (NumActivations / MmaElements); ++i) {
transform(ptr_activations[i],
ptr_var_mean[i / (MmaCols * MmaRows) * MmaRows + i % MmaRows],
ptr_gamma_beta[(i / MmaScaleBiasPair) % MmaCols]);
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 5,725 | C | 39.609929 | 100 | 0.634236 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_sm70.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
This is a work in progress.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/mma.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Structure to compute the matrix product targeting CUDA cores and SIMT math instructions.
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Used for partial specialization
typename Enable = bool
>
class MmaVoltaTensorOp {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = ElementA_;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = ElementB_;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = ElementC_;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Architecture tag
using ArchTag = arch::Sm70;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Underlying instruction shape
using InstructionShape = typename ArchMmaOperator::Shape;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// interleaved 32x32 tiles
using InterleavedTileShape = GemmShape<32, 32, 4>;
static_assert(!(Shape::kM % InterleavedTileShape::kM) &&
!(Shape::kN % InterleavedTileShape::kN),
"Shape must be a multiple of InterleavedTileShape.");
public:
/// Iterates over the A operand in memory
using IteratorA = MmaVoltaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<
ArchMmaOperator::Shape::kM,
ArchMmaOperator::Shape::kK
>,
Policy::OpDelta::kRow,
kThreadCount
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Iterates over the B operand in memory
using IteratorB = MmaVoltaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<
ArchMmaOperator::Shape::kK,
ArchMmaOperator::Shape::kN
>,
Policy::OpDelta::kRow,
kThreadCount
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Iterates over the C operand in memory
using IteratorC = MmaVoltaTensorOpAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta
>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
private:
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
InterleavedTileShape::kM / ArchMmaOperator::Shape::kM,
InterleavedTileShape::kN / ArchMmaOperator::Shape::kN
>;
using TileIterations = MatrixShape<
Shape::kM / InterleavedTileShape::kM,
Shape::kN / InterleavedTileShape::kN
>;
// Whether matrix B is reordered
bool reorder_B_;
public:
/// Underlying matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaVoltaTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) {
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
MmaOperandA const *ptr_A = reinterpret_cast<MmaOperandA const *>(&A);
MmaOperandB const *ptr_B = reinterpret_cast<MmaOperandB const *>(&B);
MmaOperandC *ptr_D = reinterpret_cast<MmaOperandC *>(&D);
CUTLASS_PRAGMA_UNROLL
for (int outer_col = 0; outer_col < TileIterations::kColumn; ++outer_col) {
CUTLASS_PRAGMA_UNROLL
for (int inner_col = 0; inner_col < MmaIterations::kColumn; ++inner_col) {
CUTLASS_PRAGMA_UNROLL
for (int outer_row = 0; outer_row < TileIterations::kRow; ++outer_row) {
CUTLASS_PRAGMA_UNROLL
for (int inner_row = 0; inner_row < MmaIterations::kRow; ++inner_row) {
int op_col = inner_col + MmaIterations::kColumn * outer_col;
// Column-major serpentine sequence to maximize reuse of A operand.
int inner_row_serp = inner_row;
int outer_row_serp = outer_row;
if (op_col & 1) {
inner_row_serp = MmaIterations::kRow - inner_row - 1;
outer_row_serp = TileIterations::kRow - outer_row - 1;
}
int op_row = inner_row_serp + MmaIterations::kRow * outer_row_serp;
int op_idx = inner_row_serp + MmaIterations::kRow *
(inner_col + MmaIterations::kColumn *
(outer_row_serp + TileIterations::kRow * outer_col));
mma(
ptr_D[op_idx],
ptr_A[op_row],
ptr_B[op_col],
ptr_D[op_idx]);
}
}
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
| 8,966 | C | 30.911032 | 100 | 0.649453 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/complex.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
#include "cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA = ComplexTransform::kNone,
/// Complex transform on B operand
ComplexTransform TransformB = ComplexTransform::kNone,
/// Do source operands need more than one elements
bool GeneralizedOperatorElements = false,
/// Used for partial specialization
typename Enable = bool
>
class MmaGaussianComplexTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaGaussianComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddGaussianComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is
/// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively
/// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected gaussian complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaGaussianComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
static_assert(MmaOperandA::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the A operand."
"We can geneneralize later.");
static_assert(MmaOperandB::kElements == 1,
"This implementation only supports math instructions in which exactly one element is needed for the B operand."
"We can geneneralize later.");
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Asum;
MmaOperandB operand_Br;
operand_Asum[0] = A[m].real() + ((kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag());
operand_Br[0] = B[n].real();
// accumulator part1
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_Asum, operand_Br, *accum);
}
// mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ar;
MmaOperandB operand_Bdiff;
operand_Ar[0] = -A[m].real();
operand_Bdiff[0] = B[n].real() - ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag());
// accumulator part2
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_Ar, operand_Bdiff, *accum);
}
// mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ai;
MmaOperandB operand_Bsum;
operand_Ai[0] = (kTransformA == ComplexTransform::kConjugate) ? -A[m].imag() : +A[m].imag();
operand_Bsum[0] = B[n].real() + ((kTransformB == ComplexTransform::kConjugate) ? -B[n].imag() : +B[n].imag());
// accumulator part3
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount;
mma(*accum, operand_Ai, operand_Bsum, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
//TODO: Implement this
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for complex*complex+complex => complex using real-valued TensorOps
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
/// Data type of A elements
typename RealElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename RealElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename RealElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Policy describing warp-level MmaTensorOp (concept: MmaTensorOp policy)
typename Policy_,
/// Complex transform on A operand
ComplexTransform TransformA,
/// Complex transform on B operand
ComplexTransform TransformB
>
class MmaGaussianComplexTensorOp<
Shape_,
complex<RealElementA>,
LayoutA_,
complex<RealElementB>,
LayoutB_,
complex<RealElementC>,
LayoutC_,
Policy_,
TransformA,
TransformB,
true> {
public:
/// Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
/// Data type of multiplicand A
using ElementA = complex<RealElementA>;
/// Layout of multiplicand A
using LayoutA = LayoutA_;
/// Data type of multiplicand B
using ElementB = complex<RealElementB>;
/// Layout of multiplicand B
using LayoutB = LayoutB_;
/// Data type of accumulator matrix C
using ElementC = complex<RealElementC>;
/// Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaLanePolicySimt)
using Policy = Policy_;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Shape of underlying instruction
using InstructionShape = typename ArchMmaOperator::Shape;
/// Underlying arch tag
using ArchTag = typename ArchMmaOperator::ArchTag;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassTensorOp;
/// Indicates math operator
using MathOperator = arch::OpMultiplyAddGaussianComplex;
/// Complex transform on A operand
static ComplexTransform const kTransformA = TransformA;
/// Complex transform on B operand
static ComplexTransform const kTransformB = TransformB;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>,
Operand::kA,
ElementA,
LayoutA,
MatrixShape<ArchMmaOperator::Shape::kM, ArchMmaOperator::Shape::kK>,
Policy::OpDelta::kRow,
32,
1
>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Storage for transformed A tile
using TransformedFragmentA = FragmentA;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>,
Operand::kB,
ElementB,
LayoutB,
MatrixShape<ArchMmaOperator::Shape::kK, ArchMmaOperator::Shape::kN>,
Policy::OpDelta::kColumn,
32,
1
>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Storage for transformed B tile
using TransformedFragmentB = FragmentB;
static_assert(
!(Shape::kM % ArchMmaOperator::Shape::kM) &&
!(Shape::kN % ArchMmaOperator::Shape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<
Shape::kM / ArchMmaOperator::Shape::kM,
Shape::kN / ArchMmaOperator::Shape::kN
>;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpGaussianComplexAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>,
ElementC,
LayoutC,
typename ArchMmaOperator::Shape,
typename Policy::OpDelta>;
/// Storage for C tile, the accumulator. Note, regardless of multiplicand type, this
/// storage arrangement is to be considered 'gaussian complex' in the sense that the accumulation is
/// done in three parts namely part1, part2, and part3. The parts 1, 2, and 3 are stored consecutively
/// in InteratorC::Frament. This matches the structure of Tensor Cores which are always real-valued matrix multiplies.
using FragmentC = typename IteratorC::Fragment;
static_assert(
FragmentC::kElements == 3 * MmaIterations::kCount * ArchMmaOperator::FragmentC::kElements,
"Unexpected gaussian complex fragment length.");
private:
//
// Data members
//
/// Underlying real-valued matrix multiply operator (concept: arch::Mma)
ArchMmaOperator mma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaGaussianComplexTensorOp() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C
) const {
// Alias types for underlying real-valued matrix multiply operator
using MmaOperandA = typename ArchMmaOperator::FragmentA;
using MmaOperandB = typename ArchMmaOperator::FragmentB;
using MmaOperandC = typename ArchMmaOperator::FragmentC;
D = C;
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < MmaIterations::kRow; ++m) {
// mma(accum.part1(), (a.real() + a.imag()), b.real(), accum.part1());
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Asum;
MmaOperandB operand_Br;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Asum[mk] = A[m*MmaOperandA::kElements + mk].real() + ((kTransformA == ComplexTransform::kConjugate) ?
-A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag());
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Br[nk] = B[n*MmaOperandB::kElements + nk].real();
// accumulator part1
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow);
mma(*accum, operand_Asum, operand_Br, *accum);
}
// mma(accum.part2(), -a.real(), (b.real() - b.imag()), accum.part2());
CUTLASS_PRAGMA_UNROLL
for (int n = MmaIterations::kColumn - 1; n >= 0; --n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ar;
MmaOperandB operand_Bdiff;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Ar[mk] = -A[m*MmaOperandA::kElements + mk].real();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Bdiff[nk] = B[n*MmaOperandB::kElements + nk].real() - ((kTransformB == ComplexTransform::kConjugate) ?
-B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag());
// accumulator part2
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + MmaIterations::kCount;
mma(*accum, operand_Ar, operand_Bdiff, *accum);
}
// mma(accum.part3(), a.imag(), (b.real() + b.imag()), accum.part3())
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < MmaIterations::kColumn; ++n) {
// Pack operands together. This may result in actual MOVs
MmaOperandA operand_Ai;
MmaOperandB operand_Bsum;
CUTLASS_PRAGMA_UNROLL
for (int mk = 0; mk < MmaOperandA::kElements; ++mk)
operand_Ai[mk] = (kTransformA == ComplexTransform::kConjugate) ?
-A[m*MmaOperandA::kElements + mk].imag() : +A[m*MmaOperandA::kElements + mk].imag();
CUTLASS_PRAGMA_UNROLL
for (int nk = 0; nk < MmaOperandB::kElements; ++nk)
operand_Bsum[nk] = B[n*MmaOperandB::kElements + nk].real() + ((kTransformB == ComplexTransform::kConjugate) ?
-B[n*MmaOperandB::kElements + nk].imag() : +B[n*MmaOperandB::kElements + nk].imag());
// accumulator part3
MmaOperandC *accum = reinterpret_cast<MmaOperandC *>(&D) +
(m + n * MmaIterations::kRow) + 2 * MmaIterations::kCount;
mma(*accum, operand_Ai, operand_Bsum, *accum);
}
}
}
/// Transform the mma operands to the required types
CUTLASS_DEVICE
void transform(TransformedFragmentA &dst_A, TransformedFragmentB &dst_B,
FragmentA const &A, FragmentB const &B) const {
dst_A = A;
dst_B = B;
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 21,205 | C | 31.928571 | 120 | 0.652063 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 128b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCongruous128b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 8) && !(Shape::kStrided % 4), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 128, "This is specialized for 128b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous128b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 1;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) {
int quad_pair = lane_id / 8;
int quad = lane_id / 4;
int lane = lane_id % 4;
int row = (quad & 1) * 4 + (lane ^ quad_pair);
byte_offset_ = (row + quad_pair * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.contiguous() * Shape::kContiguous) +
(tile_offset.strided() * InstructionShape::kStrided * stride_);
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kStrided;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCongruous128b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCongruous128b,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous128b;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCongruous128b,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for complex<T>
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of underlying field of reals.
typename RealElement,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpAccumulatorTileIterator<
Shape_, complex<RealElement>, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = complex<RealElement>;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile. It is assumed that the accumulators
/// are stored in a planar complex arrangement with the real parts as entirely contiguous
/// followed by the imaginary parts.
using Fragment = Array<RealElement, Shape::kCount / kThreads * 2>;
static int const kRealIndex = 0;
static int const kImaginaryIndex = Shape::kCount / kThreads;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
Element z = offset_ref.at({accum_m, accum_n});
frag[mma_accum_start + row * kElementsPerAccess + col + kRealIndex] = z.real();
frag[mma_accum_start + row * kElementsPerAccess + col + kImaginaryIndex] = z.imag();
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
Element z(frag[kRealIndex + idx], frag[kImaginaryIndex + idx]);
offset_ref.at({accum_m, accum_n}) = z;
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 128b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::TensorOpMultiplicandCrosswise128x4,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 8), "Divisibility.");
static_assert(sizeof_bits<Element_>::value == 128, "This is specialized for 128b accesses.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCrosswise128x4;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 1;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 8>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0) {
int quad = lane_id / 4;
int liq = lane_id % 4;
int c = liq + (quad & 1) * 4;
int s = (quad / 2);
byte_offset_ = (c + s * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
// Compute the offset in units of elements. Note, the external coordinate system is
// approximately transposed with respect to the tiled internal structure
int offset =
(tile_offset.contiguous() * InstructionShape::kContiguous) * stride_ +
(tile_offset.strided() * Shape::kStrided);
add_pointer_offset(offset);
byte_offset_ ^= (tile_offset.contiguous() & 1) * 4 * sizeof(AccessType);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
byte_offset_ ^= 4 * sizeof(AccessType);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = s + c * Policy::Iterations::kStrided;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * InstructionShape::kContiguous * stride_ +
tile_offset.strided() * Shape::kStrided;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajorTensorOpMultiplicandCrosswise128x4;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise128x4,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.column(), -tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.strided(), tile_offset.contiguous()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, Element_,
cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::ColumnMajorTensorOpMultiplicandCrosswise128x4;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Underlying tile iterator implementation
using Base = MmaTensorOpMultiplicandTileIterator<
layout::PitchLinearShape<Shape::kRow, Shape::kColumn>, kOperand, Element,
layout::TensorOpMultiplicandCrosswise128x4,
layout::PitchLinearShape<InstructionShape::kRow,
InstructionShape::kColumn>,
kOpDelta, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
): iterator_({ref.data(), ref.stride()}, lane_id) {
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(tile_offset.row(), tile_offset.column()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(layout::PitchLinearCoord(-tile_offset.row(), -tile_offset.column()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
iterator_.load(frag);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag,
{tile_offset.contiguous(), tile_offset.strided()},
byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Congruous shared memory layout
// Warp-level iterators for complex<float>*complex<float> + complex<float> => complex<float>
// The underlying iterators are similar to that for MMA f64*f64 + f64 = f64
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, cutlass::complex<float>,
cutlass::layout::TensorOpMultiplicandCongruous64b,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 16) && !(Shape::kStrided % 8), "Divisibility.");
/// Element type
using Element = cutlass::complex<float>;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicandCongruous64b;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<8, 4>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
Shape::kContiguous / kElementsPerAccess / Delta::kContiguous,
InstructionShape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
/// Internal counter used to jump to next K partition
int k_group_idx_;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kContiguous * InstructionShape::kStrided / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / Policy::Delta::kContiguous;
int access_contiguous = (lane_id % Policy::Delta::kContiguous) ^ access_strided;
pointer_= reinterpret_cast<AccessType const *>(ref.data()) +
access_contiguous + access_strided * stride_;
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof(Element);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset =
(tile_offset.strided() * InstructionShape::kStrided) * stride_ * kElementsPerAccess +
tile_offset.contiguous() * Shape::kContiguous;
add_pointer_offset(offset);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
add_tile_offset({0, 1});
return *this;
}
/// Advances the iterator along the opposite of the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator & operator--() {
add_tile_offset({0, -1});
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
int access_idx = c + s * Policy::Iterations::kContiguous;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c +
Policy::Delta::kStrided * s * stride_;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset =
tile_offset.contiguous() * Shape::kContiguous / Layout::kElementsPerAccess +
tile_offset.strided() * InstructionShape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
}
};
////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Crosswise shared memory layout
// Warp-level iterators for complex<float>*complex<float> + complex<float> => complex<float>
// The underlying iterators are similar to that for f64*f64 + f64 = f64
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for loading 128b vectors of 64b elements.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Identifies A or B multiplicand
Operand Operand_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions)
int OpDelta_,
/// Number of partitions along K dimension
int PartitionsK_>
class MmaTensorOpMultiplicandTileIterator<
Shape_, Operand_, complex<float>,
cutlass::layout::TensorOpMultiplicand64bCrosswise,
InstructionShape_, OpDelta_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand_;
static_assert(kOperand == Operand::kA || kOperand== Operand::kB,
"MmaTensorOpMultiplicandIterator may only be instantiated for A or B operands to warp-level Mma.");
static_assert(!(Shape::kContiguous % 4) && !(Shape::kStrided % 16), "Divisibility.");
static_assert(sizeof_bits<complex<float>>::value == 64, "This is specialized for 64b accesses.");
/// Element type
using Element = complex<float>;
/// Layout of source tile
using Layout = cutlass::layout::TensorOpMultiplicand64bCrosswise;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
static int const kOpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Long Index type
using StrideIndex = typename TensorRef::Layout::Stride::Index;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Load two elements per access
static int const kElementsPerAccess = 2;
/// Policy defining internal details of tile iterator
struct Policy {
/// Shape of one access
using Delta = layout::PitchLinearShape<4, 16>;
/// Number of iterations to load
using Iterations = layout::PitchLinearShape<
InstructionShape::kContiguous / Delta::kContiguous,
Shape::kStrided / Delta::kStrided
>;
};
private:
/// Not working on this feature at the moment.
static_assert(kOpDelta == 1,
"Alternative arrangements not supported at present.");
/// Pointer type used for accesses
using AccessType = AlignedArray<Element, kElementsPerAccess, 16>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment =
Array<Element, Shape::kStrided * InstructionShape::kContiguous / kThreads>;
private:
/// Layout object storing stride values
StrideIndex stride_;
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter for tracking K-group
Index k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpMultiplicandTileIterator(): stride_(0), byte_offset_(0) { }
/// Constructor from TensorRef
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator(
TensorRef const &ref,
int lane_id
):
stride_(ref.stride(0) / kElementsPerAccess), byte_offset_(0),
k_group_idx_(0) {
int access_strided = lane_id / 8;
int access_contiguous = (lane_id % 8);
byte_offset_ = (access_contiguous + access_strided * stride_) * sizeof(AccessType);
pointer_= reinterpret_cast<AccessType const *>(ref.data());
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_pointer_offset(LongIndex offset) {
pointer_ += offset / kElementsPerAccess;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
int offset = (tile_offset.contiguous() * InstructionShape::kContiguous) *
stride_ * kElementsPerAccess +
tile_offset.strided() * Shape::kStrided;
add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator &add_tile_offset_negative(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
if (k_group_idx_ & 1)
byte_offset_ ^= 0x40;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator++() {
pointer_ += stride_ * InstructionShape::kContiguous;
// xor ptr
byte_offset_ ^= 0x40;
++k_group_idx_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpMultiplicandTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_byte_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
AccessType *fetch_ptr = reinterpret_cast<AccessType *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::Iterations::kContiguous; ++c) {
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < Policy::Iterations::kStrided; ++s) {
int access_idx = c * Policy::Iterations::kStrided + s;
AccessType const *source_ptr = pointer_ +
Policy::Delta::kContiguous * c * stride_ +
Policy::Delta::kStrided * s / kElementsPerAccess;
char const *source_byte_ptr = reinterpret_cast<char const *>(source_ptr) + byte_offset + byte_offset_;
AccessType const *source = reinterpret_cast<AccessType const *>(source_byte_ptr);
fetch_ptr[access_idx] = *source;
}
}
Element *exchange_ptr = reinterpret_cast<Element *>(&frag);
// exchange on 64b granularity only for fragments held in k=8/2 to k=8
CUTLASS_PRAGMA_UNROLL
for (int i = Fragment::kElements/2; i < Fragment::kElements; i += 2) {
Element tmp = exchange_ptr[i];
exchange_ptr[i] = exchange_ptr[i + 1];
exchange_ptr[i + 1] = tmp;
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
Layout::kElementsPerAccess +
tile_offset.strided() * Shape::kStrided * stride_;
byte_offset += sizeof(AccessType) * pointer_offset;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group;
}
};
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 78,615 | C | 30.522053 | 112 | 0.674782 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/default_mma_sparse_tensor_op.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Default warp-level GEMM operators selected by data type, size, and layouts of operands.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/gemm/warp/mma_sparse_tensor_op.h"
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the Gemm problem - concept: gemm::GemmShape<>
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A elements
typename ElementA_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
/// Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
/// Element type of C matrix
typename ElementC_,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
/// Operator describing the tensor operation
typename Operator_ = arch::OpMultiplyAdd,
/// Number of partitions along K dimension
int PartitionsK = 1,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor = false
>
struct DefaultSparseMmaTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial Specialization - inputs and output types are float - uses TF32 internally
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of target matrix multiply instruction (concept: GemmShape)
typename InstructionShape_,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultSparseMmaTensorOp<
WarpShape_,
InstructionShape_,
float, LayoutA,
float, LayoutB,
float, LayoutC,
arch::OpMultiplyAdd, PartitionsK, AccumulatorsInRowMajor> {
// Uses TF32 internally
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::SparseMma<
InstructionShape_,
32,
tfloat32_t, cutlass::layout::RowMajor,
tfloat32_t, cutlass::layout::ColumnMajor,
float, cutlass::layout::RowMajor,
arch::OpMultiplyAdd
>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::SparseMmaTensorOp<
WarpShape_, float, LayoutA, float, LayoutB, float, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Partial specialization for m-by-n-by-kgroup
template <
/// Shape of one matrix production operation (concept: GemmShape)
typename WarpShape_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Data type of A elements
typename ElementA,
/// Layout of A matrix (concept: MatrixLayout)
typename LayoutA,
/// Data type of B elements
typename ElementB,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB,
/// Element type of C matrix
typename ElementC,
/// Layout of C matrix (concept: MatrixLayout)
typename LayoutC,
/// Operator describing the tensor operation
typename Operator_,
/// Number of partitions along K dimension
int PartitionsK,
/// Store the accumulators in row major or column major. Row major is used
/// when output layout is interleaved.
bool AccumulatorsInRowMajor>
struct DefaultSparseMmaTensorOp {
using Policy = cutlass::gemm::warp::MmaTensorOpPolicy<
cutlass::arch::SparseMma<InstructionShape_, 32, ElementA,
cutlass::layout::RowMajor, ElementB,
cutlass::layout::ColumnMajor, ElementC,
cutlass::layout::RowMajor, Operator_>,
cutlass::MatrixShape<1, 1> >;
// Define the warp-level tensor op
using Type = cutlass::gemm::warp::SparseMmaTensorOp<
WarpShape_, ElementA, LayoutA, ElementB, LayoutB, ElementC, LayoutC,
Policy, PartitionsK, AccumulatorsInRowMajor>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 6,684 | C | 39.271084 | 100 | 0.634351 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_gaussian_complex_tensor_op_tile_iterator_sm80.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level matrix multiply operations targeting Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm80.h"
#include "cutlass/gemm/warp/mma_complex_tensor_op_tile_iterator_sm80.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Element type
typename Element_,
/// Layout of operand in memory
typename Layout_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpGaussianComplexAccumulatorTileIterator;
////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
///
/// Partial specialization for complex<T>
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of underlying field of reals.
typename RealElement,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Interval between adjacent *MMA instructions (in units of MMA
/// instructions, concept: MatrixShape)
typename OpDelta_>
class MmaTensorOpGaussianComplexAccumulatorTileIterator<
Shape_, complex<RealElement>, cutlass::layout::RowMajor, InstructionShape_, OpDelta_> {
public:
/// Shape of tile to load (concept: MatrixShape)
using Shape = Shape_;
/// Operand tag
static Operand const kOperand = Operand::kC;
/// Element type
using Element = complex<RealElement>;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Delta between *MMA operations (in units of *MMA operations, concept: MatrixShape)
using OpDelta = OpDelta_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
struct Policy {
static_assert(
!(Shape::kRow % InstructionShape::kM) &&
!(Shape::kColumn % InstructionShape::kN),
"Shape of warp-level Mma must be divisible by operator shape.");
static_assert(platform::is_same<TensorCoord, MatrixCoord>::value,
"Layouts must be defined for logical MatrixCoord coordinate space.");
/// Number of mma operations performed
using MmaIterations = MatrixShape<Shape::kRow / InstructionShape::kM,
Shape::kColumn / InstructionShape::kN>;
};
private:
// Assume accumulator tile is an arrangement of 8-by-8 tiles replicated over the entire
// shape, with each quad mapped to one row and each thread mapped to 1/4 of the elements
// of that row. The accumulators within one row are assumed to be consecutive.
static int const kElementsPerAccess = InstructionShape::kN / 4;
static int const kRowsPerTile = 8;
static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile. It is assumed that the accumulators
/// are stored in a gaussian complex arrangement with parts 1, 2, and 3 as entirely contiguous
/// arranged as [part1, part2, part3]
using Fragment = Array<RealElement, (Shape::kCount / kThreads) * 3>;
static int const kPart1Index = (Shape::kCount / kThreads) * 0;
static int const kPart2Index = (Shape::kCount / kThreads) * 1;
static int const kPart3Index = (Shape::kCount / kThreads) * 2;
private:
/// Reference to output tensor
TensorRef ref_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator() { }
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator(
TensorRef const &ref,
int lane_id
):
ref_(ref) {
int quad = (lane_id >> 2);
int lane_in_quad = (lane_id & 3);
MatrixCoord lane_offset(quad, lane_in_quad * kElementsPerAccess);
ref_.add_coord_offset(lane_offset);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator &add_pointer_offset(LongIndex offset) {
ref_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole tiles
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator &add_tile_offset(TensorCoord const &tile_offset) {
ref_.add_coord_offset(tile_offset * make_Coord(Shape::kRow, Shape::kColumn));
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator++() {
// deliberate no-op
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator--() {
// deliberate no-op
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator+=(TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of the tensor
CUTLASS_DEVICE
MmaTensorOpGaussianComplexAccumulatorTileIterator & operator-=(TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const {
load_with_pointer_offset(frag, 0);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
Fragment &frag, ///< fragment to load from the tensor
Index pointer_offset) const { ///< loads a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
Element z = offset_ref.at({accum_m, accum_n});
frag[mma_accum_start + row * kElementsPerAccess + col + kPart1Index] = z.real() + z.imag();
frag[mma_accum_start + row * kElementsPerAccess + col + kPart2Index] = -z.real();
frag[mma_accum_start + row * kElementsPerAccess + col + kPart3Index] = z.imag();
}
}
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
Fragment &frag, ///< fragment to load from the tensor
Index byte_offset) const { ///< loads a tile with a linear offset
load_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset) const { ///< loads a tile with a logical offset in units of whole tiles
load(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
Fragment &frag, ///< fragment to load from the tensor
TensorCoord const &tile_offset, ///< loads a tile with a logical offset in units of whole tiles
Index pointer_offset) const { ///< loads a tile with a logical offset AND a pointer offset
load_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
/// Stores a fragment to memory
CUTLASS_HOST_DEVICE
void store(Fragment const &frag) const {
store_with_pointer_offset(frag, 0);
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_pointer_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index pointer_offset) const { ///< store a tile with a linear offset
TensorRef offset_ref(ref_);
offset_ref.add_pointer_offset(pointer_offset);
CUTLASS_PRAGMA_UNROLL
for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
CUTLASS_PRAGMA_UNROLL
for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
(mma_n * Policy::MmaIterations::kRow + mma_m);
CUTLASS_PRAGMA_UNROLL
for (int row = 0; row < kAccumulatorRows; ++row) {
CUTLASS_PRAGMA_UNROLL
for (int col = 0; col < kElementsPerAccess; ++col) {
int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow +
row * kRowsPerTile;
int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col;
int idx = mma_accum_start + row * kElementsPerAccess + col;
Element z(frag[kPart1Index + idx] - frag[kPart3Index + idx],
frag[kPart1Index + idx] + frag[kPart2Index + idx]);
offset_ref.at({accum_m, accum_n}) = z;
}
}
}
}
}
/// Stores a fragment to memory with additional pointer offset
CUTLASS_DEVICE
void store_with_byte_offset(
Fragment const &frag, ///< fragment to store from the tensor
Index byte_offset) const { ///< store a tile with a linear offset
store_with_pointer_offset(byte_offset / sizeof(Element));
}
/// Stores a fragment to memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
Fragment &frag, ///< fragment to store to the tensor
TensorCoord const &tile_offset) const { ///< stores a tile with a logical offset in units of whole tiles
store(frag, tile_offset, 0);
}
/// Stores a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void store(
/// fragment to store to the tensor
Fragment const &frag,
/// stores a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// stores a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
store_with_pointer_offset(frag, ref_.offset(tile_offset) + pointer_offset);
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 14,589 | C | 36.314578 | 112 | 0.639043 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/mma_tensor_op_wmma.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Templates implementing warp-level matrix multiply-accumulate operations targeting
Tensor Cores.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/arch/wmma.h"
#if defined(CUTLASS_ARCH_WMMA_ENABLED)
#include "cutlass/wmma_array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/arch/mma_sm75.h"
#include "cutlass/arch/mma_sm80.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/gemm/warp/mma.h"
#include "cutlass/gemm/warp/mma_tensor_op_policy.h"
#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_wmma.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
/////////////////////////////////////////////////////////////////////////////////////////////////
///< Structure to compute the matrix product targeting CUDA cores via WMMA.
template <
///< Size of the Gemm problem - concept: gemm::GemmShape<>
typename Shape_,
///< Data type of A elements
typename ElementA_,
///< Layout of A matrix (concept: MatrixLayout)
typename LayoutA_,
///< Data type of B elements
typename ElementB_,
/// Layout of B matrix (concept: MatrixLayout)
typename LayoutB_,
///< Element type of C matrix
typename ElementC_,
///< Layout of C matrix (concept: MatrixLayout)
typename LayoutC_,
///< Policy describing warp-level Wmma operation (concept: MmaTensorOpPolicy)
typename Policy_,
///< Number of partitions along K dimension
int PartitionsK_ = 1,
///< Used for partial specialization
typename Enable = bool
>
class MmaTensorOpWmma {
public:
///< Shape of warp-level matrix operation (concept: GemmShape)
using Shape = Shape_;
///< Data type of multiplicand A
using ElementA = ElementA_;
///< Layout of multiplicand A
using LayoutA = LayoutA_;
///< Data type of multiplicand B
using ElementB = ElementB_;
///< Layout of multiplicand B
using LayoutB = LayoutB_;
///< Data type of accumulator matrix C
using ElementC = ElementC_;
///< Layout of accumulator matrix C
using LayoutC = LayoutC_;
/// Shape of the warp in units of thread (concept: MmaTensorOpPolicy)
using Policy = Policy_;
/// Underlying instruction shape
using InstructionShape = typename Policy::Operator::Shape;
/// Underlying matrix multiply operator (concept: arch::Mma)
using ArchMmaOperator = typename Policy::Operator;
/// Indicates math operator
using MathOperator = typename ArchMmaOperator::Operator;
/// Underlying architecture tag
using ArchTag = typename Policy::Operator::ArchTag;
/// Complex transform on A operand
static ComplexTransform const kTransformA = ComplexTransform::kNone;
/// Complex transform on B operand
static ComplexTransform const kTransformB = ComplexTransform::kNone;
/// Indicates class of matrix operator
using OperatorClass = arch::OpClassWmmaTensorOp;
/// Number of threads participating in warp-level matrix product
static int const kThreadCount = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
public:
/// Iterates over the A operand in memory
using IteratorA = MmaTensorOpWmmaMultiplicandTileIterator<
MatrixShape<Shape::kM, Shape::kK>, Operand::kA, ElementA, LayoutA,
Policy::OpDelta::kRow, kThreadCount, Policy>;
/// Storage for A tile
using FragmentA = typename IteratorA::Fragment;
/// Iterates over the B operand in memory
using IteratorB = MmaTensorOpWmmaMultiplicandTileIterator<
MatrixShape<Shape::kK, Shape::kN>, Operand::kB, ElementB, LayoutB,
Policy::OpDelta::kRow, kThreadCount, Policy>;
/// Storage for B tile
using FragmentB = typename IteratorB::Fragment;
/// Iterates over the C operand in memory
using IteratorC = MmaTensorOpWmmaAccumulatorTileIterator<
MatrixShape<Shape::kM, Shape::kN>, ElementC, LayoutC,
typename Policy::OpDelta, Policy>;
/// Storage for C tile
using FragmentC = typename IteratorC::Fragment;
private:
static_assert(
!(Shape::kM % Policy::Operator::Shape::kM) &&
!(Shape::kN % Policy::Operator::Shape::kN),
"Shape of warp-level Wmma must be divisible by operator shape (wmma native size)");
/// Number of wmma operations performed
using WmmaIterations = MatrixShape<
Shape::kM / Policy::Operator::Shape::kM,
Shape::kN / Policy::Operator::Shape::kN
>;
public:
/// Underlying matrix multiply operator (concept: cutlass::arch::Wmma)
typename Policy::Operator wmma;
public:
//
// Methods
//
/// Ctor
CUTLASS_DEVICE
MmaTensorOpWmma() {}
/// Performs a warp-level matrix multiply-accumulate operation
CUTLASS_DEVICE
void operator()(
FragmentC &D,
FragmentA const &A,
FragmentB const &B,
FragmentC const &C) const {
CUTLASS_PRAGMA_UNROLL
for (int n = 0; n < WmmaIterations::kColumn; ++n) {
CUTLASS_PRAGMA_UNROLL
for (int m = 0; m < WmmaIterations::kRow; ++m) {
// accumulate wmma mma
wmma(D[m * WmmaIterations::kColumn + n], A[m], B[n], C[m * WmmaIterations::kColumn + n]);
}
}
}
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
#endif // if defined(CUTLASS_ARCH_WMMA_ENABLED)
| 7,241 | C | 31.330357 | 100 | 0.670764 |
NVIDIA/warp/warp/native/cutlass/include/cutlass/gemm/warp/scale_bias_tile_iterator.h | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Defines iterators used by warp-level loading scale and bias vectors.
Every scale/bias data only needs to be loaded once for every channel.
*/
#pragma once
#include "cutlass/cutlass.h"
#include "cutlass/array.h"
#include "cutlass/numeric_types.h"
#include "cutlass/tensor_ref.h"
#include "cutlass/matrix_shape.h"
#include "cutlass/arch/memory_sm75.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/layout/pitch_linear.h"
#include "cutlass/layout/tensor_op_multiplicand_sm75.h"
#include "cutlass/platform/platform.h"
#include "cutlass/fast_math.h"
////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace gemm {
namespace warp {
////////////////////////////////////////////////////////////////////////////////
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of A elements
typename Element_,
/// Layout of operand
typename Layout_,
/// Shape of one matrix production operation (concept: GemmShape)
typename InstructionShape_,
/// Policy of the details of LDSM shape and iterations
typename Policy_,
/// Number of threads participating in one matrix operation
int Threads,
/// Number of partitions along K dimension
int PartitionsK_ = 1>
class ScaleBiasTileIterator;
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: PitchLinearShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: PitchLinearShape)
typename InstructionShape_,
/// Policy of the details of LDSM shape and iterations
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_>
class ScaleBiasTileIterator<Shape_, Element_, cutlass::layout::PitchLinear,
InstructionShape_, Policy_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::PitchLinear;
/// Shape of one matrix product operation (concept: GemmShape)
using InstructionShape = InstructionShape_;
/// Number of participating threads
static int const kThreads = 32;
/// Number of partitions along K dimension
static int const kPartitionsK = PartitionsK_;
/// Number of partitions along K dimension
static int const kElementsPerAccess = 128 / sizeof_bits<Element>::value;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
using Policy = Policy_;
private:
/// Pointer type used for accesses
using AccessType = Array<Element, kElementsPerAccess>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = Array<Element, 2 * Policy::kLdsmOpInner *
InstructionShape::kContiguous / kThreads>;
private:
/// Shared memory base pointers - not advanced
AccessType const *pointer_;
/// Byte offset incremented as iterator advances
Index byte_offset_;
/// Internal counter used to determine when to increment byte offset and when
/// to XOR it
int k_group_idx_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator()
: pointer_(nullptr),
byte_offset_(0),
k_group_idx_(0) {}
/// Constructor from TensorRef
CUTLASS_DEVICE
ScaleBiasTileIterator(TensorRef const &ref_scale_bias,
int lane_id)
: byte_offset_(0), k_group_idx_(0) {
/// 16816 only
pointer_ = reinterpret_cast<AccessType const *>(ref_scale_bias.data()) +
((lane_id >> 3) & 1) * Shape::kContiguous / kElementsPerAccess +
(lane_id >> 4);
}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_DEVICE
ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) {
byte_offset_ += offset * sizeof_bits<Element>::value / 8;
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
ScaleBiasTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
int whole_tiles = tile_offset.contiguous() / Policy::kGroupsPerTile;
int k_groups_delta = tile_offset.contiguous() % Policy::kGroupsPerTile;
byte_offset_ += k_groups_delta * sizeof_bits<Element>::value *
kElementsPerAccess * Policy::LdsmShape::kContiguous / 8;
// Multiply by 2 because scale and bias belonging to the same stage are next
// to each other in the shared memory.
pointer_ += (2 * whole_tiles * Shape::kContiguous / kElementsPerAccess);
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_DEVICE
ScaleBiasTileIterator &operator++() {
byte_offset_ += Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value * kElementsPerAccess / 8;
k_group_idx_++;
if (k_group_idx_ == (Policy::kGroupsPerTile / kPartitionsK)) {
k_group_idx_ = 0;
byte_offset_ -= (Policy::kGroupsPerTile / kPartitionsK) *
Policy::LdsmShape::kContiguous *
sizeof_bits<Element>::value * kElementsPerAccess / 8;
add_tile_offset({Policy::kGroupsPerTile, 0});
}
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator &operator--() { assert(0); }
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
ScaleBiasTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(tile_offset);
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
ScaleBiasTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-tile_offset);
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { load_with_byte_offset(frag, 0); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset in units of bytes
Index byte_offset) const {
Array<unsigned, 4> *fetch_ptr =
reinterpret_cast<Array<unsigned, 4> *>(&frag);
CUTLASS_PRAGMA_UNROLL
for (int s = 0; s < 1; ++s) {
CUTLASS_PRAGMA_UNROLL
for (int c = 0; c < Policy::LdsmIterations::kContiguous; ++c) {
int access_idx = c + s * Policy::LdsmIterations::kContiguous;
AccessType const *source_ptr =
pointer_ + Policy::LdsmShape::kContiguous * c;
char const *source_byte_ptr =
reinterpret_cast<char const *>(source_ptr) + byte_offset +
byte_offset_;
cutlass::arch::ldsm<layout::RowMajor, 4>(
fetch_ptr[access_idx], source_byte_ptr);
}
}
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
load_with_byte_offset(frag, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
load_with_byte_offset(frag, tile_offset, 0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
load_with_byte_offset(frag, tile_offset, pointer_offset * sizeof(Element));
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
Index pointer_offset = tile_offset.contiguous() *
InstructionShape::kContiguous /
kElementsPerAccess;
byte_offset += sizeof_bits<AccessType>::value * pointer_offset / 8;
load_with_byte_offset(frag, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
k_group_idx_ = k_group % (Policy::kGroupsPerTile / kPartitionsK);
}
};
////////////////////////////////////////////////////////////////////////////////
/// This tile iterator is specialized for 32-thread TensorOps. It uses LDSM to
/// load from shared memory and therefore must be initialized with a TensorRef
/// to shared memory.
///
/// Satisfies:
/// ReadableRandomAccessContiguousTileIteratorConcept
///
template <
/// Size of the matrix to load (concept: MatrixShape)
typename Shape_,
/// Data type of elements
typename Element_,
/// Shape of one matrix product operation (concept: MatrixShape)
typename InstructionShape_,
/// Policy of the details of LDSM shape and iterations
typename Policy_,
/// Number of partitions along K dimension
int PartitionsK_>
class ScaleBiasTileIterator<Shape_, Element_, cutlass::layout::RowMajor,
InstructionShape_, Policy_, 32, PartitionsK_> {
public:
/// Shape of tile to load (concept: PitchLinearShape)
using Shape = Shape_;
/// Element type
using Element = Element_;
/// Layout of source tile
using Layout = cutlass::layout::RowMajor;
/// Shape of one matrix product operation (concept: MatrixShape)
using InstructionShape = InstructionShape_;
/// Number of participating threads
static int const kThreads = 32;
/// TensorRef type for loading element from a tensor
using TensorRef = TensorRef<Element, Layout>;
/// Index type
using Index = typename TensorRef::Index;
/// Long Index type
using LongIndex = typename TensorRef::LongIndex;
/// Coordinate for an element in the tensor
using TensorCoord = typename TensorRef::TensorCoord;
/// Internal structure of iterator - made public to enable introspection
using Policy = Policy_;
/// Underlying tile iterator implementation
using Base = ScaleBiasTileIterator<
layout::PitchLinearShape<Shape::kColumn, Shape::kRow>, Element,
layout::PitchLinear,
layout::PitchLinearShape<InstructionShape::kColumn,
InstructionShape::kRow>,
Policy, kThreads, PartitionsK_>;
public:
//
// Derived quantities
//
/// Fragment object holding a thread's part of a tile
using Fragment = typename Base::Fragment;
private:
/// Underlying tile iterator
Base iterator_;
public:
/// Default ctor constructs null iterator
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator() {}
/// Constructor from TensorRef
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator(TensorRef const &ref_scale_bias, int lane_id)
: iterator_({ref_scale_bias.data(), ref_scale_bias.stride()}, lane_id) {}
/// Adds a pointer offset to internal pointer(s) to advance through memory
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator &add_pointer_offset(LongIndex offset) {
iterator_.add_pointer_offset(offset);
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator &add_tile_offset(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances an iterator along logical dimensions of matrix in units of whole
/// tiles
CUTLASS_DEVICE
ScaleBiasTileIterator &add_tile_offset_negative(
TensorCoord const &tile_offset) {
iterator_.add_tile_offset_negative({tile_offset.column(), tile_offset.row()});
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator &operator++() {
++iterator_;
return *this;
}
/// Advances the iterator along the advance dimension
CUTLASS_HOST_DEVICE
ScaleBiasTileIterator &operator--() {
--iterator_;
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
ScaleBiasTileIterator &operator+=(
TensorCoord const &tile_offset) {
add_tile_offset(PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
///< advances in units of whole tiles along the logical coordinate space of
///< the tensor
CUTLASS_DEVICE
ScaleBiasTileIterator &operator-=(
TensorCoord const &tile_offset) {
add_tile_offset(-PitchLinearCoord(tile_offset.column(), tile_offset.row()));
return *this;
}
/// Loads a fragment from memory at the location pointed to by the iterator.
CUTLASS_HOST_DEVICE
void load(Fragment &frag) const { iterator_.load(frag); }
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_pointer_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index pointer_offset) const {
iterator_.load_with_pointer_offset(frag, pointer_offset);
}
/// Loads a fragment from memory with additional logical offset
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a linear offset
Index byte_offset) const {
iterator_.load_with_byte_offset(frag, byte_offset);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index pointer_offset) const {
// TODO
assert(0);
}
/// Loads a fragment from memory with logical offset in units of whole tiles.
CUTLASS_DEVICE
void load_with_byte_offset(
/// fragment to load from the tensor
Fragment &frag,
/// loads a tile with a logical offset in units of whole tiles
TensorCoord const &tile_offset,
/// loads a tile with a logical offset AND a pointer offset
Index byte_offset) const {
iterator_.load_with_byte_offset(
frag, {tile_offset.strided(), tile_offset.contiguous()}, byte_offset);
}
/// Notify the iterator which k-group it is currently pointing to.
///
/// This does not advance the iterator. Rather, it overrides its internal
/// tracking with constant-valued k-group index to enable the compiler to
/// fold constants and achieve more efficient code.
///
/// This is used by some nontrivial permuted layouts.
CUTLASS_DEVICE
void set_kgroup_index(int k_group) {
iterator_.set_kgroup_index(k_group);
}
};
////////////////////////////////////////////////////////////////////////////////
} // namespace warp
} // namespace gemm
} // namespace cutlass
////////////////////////////////////////////////////////////////////////////////
| 19,125 | C | 32.262609 | 100 | 0.662745 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.