hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ac96413d2583cbf119e60a42d062290ef1cfb208.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( work_efficient_inclusive_scan), dim3(DimGrid), dim3(DimBlock), 0, 0, in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( work_efficient_inclusive_scan_2), dim3(DimGrid), dim3(DimBlock), 0, 0, out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( work_efficient_inclusive_scan_3), dim3(DimGrid), dim3(DimBlock), 0, 0, out2, out3, out, in_size);
}
| ac96413d2583cbf119e60a42d062290ef1cfb208.cu | #define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
}
|
650cfff15df5bce3f37052765fc5a7d884906c4b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <time.h>
#include <sys/time.h>
#include <algorithm>
#define INF 1000000
double t1, t2;
int n, m; // Number of vertices, edges
int *Dist;
int *dev_dist;
double wallclock(void)
{ struct timeval tv;
struct timezone tz;
double t;
gettimeofday(&tv, &tz);
t = (double)tv.tv_sec*1000;
t += ((double)tv.tv_usec)/1000.0;
return t;
}// millisecond
void input(char *inFileName){
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &n, &m);
//Dist = (int*)malloc(sizeof(int)*n*n);
hipHostMalloc((void**) &Dist, sizeof(int)*n*n); //Pinned Memory
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
(i==j)?Dist[i*n+j]=0:Dist[i*n+j]=INF;
int a, b, v;
while (m--) {
fscanf(infile, "%d %d %d", &a, &b, &v);
Dist[(a-1)*n+(b-1)] = v;
}
}
void output(char *outFileName){
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (Dist[i*n+j] >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", Dist[i*n+j]);
}
fprintf(outfile, "\n");
}
}
int ceil(int a, int b){
return (a + b -1)/b;
}
__global__ void cal(int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height, int* dev_dist){
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x+blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y+blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
for (int k = Round * B; k < (Round +1) * B && k < n; k++) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2D
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i +1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j +1) * B;
if (block_internal_end_x > n) block_internal_end_x = n;
if (block_internal_end_y > n) block_internal_end_y = n;
for (int i = block_internal_start_x+threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y+threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
if (dev_dist[i*n+k] + dev_dist[k*n+j] < dev_dist[i*n+j])
dev_dist[i*n+j] = dev_dist[i*n+k] + dev_dist[k*n+j];
}
}
__syncthreads();
}
}
}
}
int init_device(void){
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("No CUDA device found.\n\n");
return 0;
} else {
hipSetDevice(0);
return 0;
}
}
void block_APSP(int B){
init_device();
int round = ceil(n, B);
hipMalloc((void**) &dev_dist, sizeof(int)*n*n);
hipMemcpy(dev_dist, Dist, sizeof(int)*n*n, hipMemcpyHostToDevice);
dim3 block(round, round), thread(min(B,32), min(B,32));
//dim3 block(10, 10), thread(10, 10);
for (int r = 0; r < round; r++) {
/* Phase 1*/
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, r, r, 1, 1, dev_dist);
/* Phase 2*/
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, r, 0, r, 1, dev_dist);
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, r, r+1, round-r-1, 1, dev_dist);
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, 0, r, 1, r, dev_dist);
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, r+1, r, 1, round-r-1, dev_dist);
/* Phase 3*/
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, 0, 0, r, r, dev_dist);
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, 0, r+1, round-r-1, r, dev_dist);
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, r+1, 0, r, round-r-1, dev_dist);
hipLaunchKernelGGL(( cal), dim3(block), dim3(thread), 0, 0, n, B, r, r+1, r+1, round-r-1, round-r-1, dev_dist);
}
hipMemcpy(Dist, dev_dist, sizeof(int)*n*n, hipMemcpyDeviceToHost);
}
int main(int argc, char* argv[]){
input(argv[1]);
int B = 64;
t1 = wallclock();
block_APSP(B);
t2 = wallclock();
//printf("total time %10.3lf\n", t2-t1);
output(argv[2]);
hipFree(dev_dist);
return 0;
}
| 650cfff15df5bce3f37052765fc5a7d884906c4b.cu | #include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <time.h>
#include <sys/time.h>
#include <algorithm>
#define INF 1000000
double t1, t2;
int n, m; // Number of vertices, edges
int *Dist;
int *dev_dist;
double wallclock(void)
{ struct timeval tv;
struct timezone tz;
double t;
gettimeofday(&tv, &tz);
t = (double)tv.tv_sec*1000;
t += ((double)tv.tv_usec)/1000.0;
return t;
}// millisecond
void input(char *inFileName){
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &n, &m);
//Dist = (int*)malloc(sizeof(int)*n*n);
cudaMallocHost((void**) &Dist, sizeof(int)*n*n); //Pinned Memory
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
(i==j)?Dist[i*n+j]=0:Dist[i*n+j]=INF;
int a, b, v;
while (m--) {
fscanf(infile, "%d %d %d", &a, &b, &v);
Dist[(a-1)*n+(b-1)] = v;
}
}
void output(char *outFileName){
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (Dist[i*n+j] >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", Dist[i*n+j]);
}
fprintf(outfile, "\n");
}
}
int ceil(int a, int b){
return (a + b -1)/b;
}
__global__ void cal(int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height, int* dev_dist){
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x+blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y+blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
for (int k = Round * B; k < (Round +1) * B && k < n; k++) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2D
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i +1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j +1) * B;
if (block_internal_end_x > n) block_internal_end_x = n;
if (block_internal_end_y > n) block_internal_end_y = n;
for (int i = block_internal_start_x+threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y+threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
if (dev_dist[i*n+k] + dev_dist[k*n+j] < dev_dist[i*n+j])
dev_dist[i*n+j] = dev_dist[i*n+k] + dev_dist[k*n+j];
}
}
__syncthreads();
}
}
}
}
int init_device(void){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("No CUDA device found.\n\n");
return 0;
} else {
cudaSetDevice(0);
return 0;
}
}
void block_APSP(int B){
init_device();
int round = ceil(n, B);
cudaMalloc((void**) &dev_dist, sizeof(int)*n*n);
cudaMemcpy(dev_dist, Dist, sizeof(int)*n*n, cudaMemcpyHostToDevice);
dim3 block(round, round), thread(min(B,32), min(B,32));
//dim3 block(10, 10), thread(10, 10);
for (int r = 0; r < round; r++) {
/* Phase 1*/
cal<<<block, thread>>>(n, B, r, r, r, 1, 1, dev_dist);
/* Phase 2*/
cal<<<block, thread>>>(n, B, r, r, 0, r, 1, dev_dist);
cal<<<block, thread>>>(n, B, r, r, r+1, round-r-1, 1, dev_dist);
cal<<<block, thread>>>(n, B, r, 0, r, 1, r, dev_dist);
cal<<<block, thread>>>(n, B, r, r+1, r, 1, round-r-1, dev_dist);
/* Phase 3*/
cal<<<block, thread>>>(n, B, r, 0, 0, r, r, dev_dist);
cal<<<block, thread>>>(n, B, r, 0, r+1, round-r-1, r, dev_dist);
cal<<<block, thread>>>(n, B, r, r+1, 0, r, round-r-1, dev_dist);
cal<<<block, thread>>>(n, B, r, r+1, r+1, round-r-1, round-r-1, dev_dist);
}
cudaMemcpy(Dist, dev_dist, sizeof(int)*n*n, cudaMemcpyDeviceToHost);
}
int main(int argc, char* argv[]){
input(argv[1]);
int B = 64;
t1 = wallclock();
block_APSP(B);
t2 = wallclock();
//printf("total time %10.3lf\n", t2-t1);
output(argv[2]);
cudaFree(dev_dist);
return 0;
}
|
efe45181cb752164f7b428aa62b286db9051894f.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* template_problem.cuh
*
* @brief GPU Storage management Structure for Template Problem Data
*/
#pragma once
#include <gunrock/app/problem_base.cuh>
namespace gunrock {
namespace app {
namespace SSSP_Test {
/**
* @brief Speciflying parameters for SSSP Problem
* @param parameters The util::Parameter<...> structure holding all parameter info
* \return hipError_t error message(s), if any
*/
hipError_t UseParameters_problem(
util::Parameters ¶meters)
{
hipError_t retval = hipSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
GUARD_CU(parameters.Use<bool>(
"mark-pred",
util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false,
"Whether to mark predecessor info.",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Template Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _FLAG Problem flags
*/
template <
typename _GraphT,
typename _ValueT = typename _GraphT::ValueT,
typename _LabelT = typename _GraphT::LabelT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG>
{
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef _ValueT ValueT;
typedef _LabelT LabelT;
typedef ProblemBase <GraphT, FLAG> BaseProblem;
typedef DataSliceBase <GraphT, FLAG> BaseDataSlice;
//Helper structures
/**
* @brief Data structure containing SSSP-specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice
{
util::Array1D<SizeT, ValueT> distances ; // distances from source
util::Array1D<SizeT, LabelT> labels ; // labels to mark latest iterattion
util::Array1D<SizeT, VertexT> preds ; // predecessors of vertices
util::Array1D<SizeT, VertexT> temp_preds; // predecessors of vertices
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice()
{
distances .SetName("distances" );
labels .SetName("labels" );
preds .SetName("preds" );
temp_preds .SetName("temp_preds" );
}
/*
* @brief Default destructor
*/
virtual ~DataSlice()
{
Release();
}
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return hipError_t Error message(s), if any
*/
hipError_t Release(util::Location target = util::LOCATION_ALL)
{
hipError_t retval = hipSuccess;
if (target & util::DEVICE)
GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(distances .Release(target));
GUARD_CU(labels .Release(target));
GUARD_CU(preds .Release(target));
GUARD_CU(temp_preds .Release(target));
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing sssp-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return hipError_t Error message(s), if any
*/
hipError_t Init(
GraphT &sub_graph,
int gpu_idx = 0,
util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None)
{
hipError_t retval = hipSuccess;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
GUARD_CU(distances .Allocate(sub_graph.nodes, target));
GUARD_CU(labels .Allocate(sub_graph.nodes, target));
if (this->flag & Mark_Predecessors){
GUARD_CU(temp_preds.Allocate(sub_graph.nodes, target));
GUARD_CU(preds .Allocate(sub_graph.nodes, target));
}
if (target & util::DEVICE)
{
GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this -> stream));
}
return retval;
} // Init
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return hipError_t Error message(s), if any
*/
hipError_t Reset(util::Location target = util::DEVICE)
{
hipError_t retval = hipSuccess;
SizeT nodes = this -> sub_graph -> nodes;
// Ensure data are allocated
GUARD_CU(distances .EnsureSize_(nodes, target));
GUARD_CU(labels .EnsureSize_(nodes, target));
if (this->flag && Mark_Predecessors){
GUARD_CU(preds .EnsureSize_(nodes, target));
GUARD_CU(temp_preds .EnsureSize_(nodes, target));
}
// Reset data
GUARD_CU(labels.ForEach([]__host__ __device__
(LabelT &label){
label = util::PreDefinedValues<LabelT>::InvalidValue;
}, node, target, this->stream));
GUARD_CU(distances.ForEach([]__host__ __device__
(ValueT &distance){
distance = util::PreDefinedValues<ValueT>::MaxValue;
}, nodes, target, this -> stream));
if (this->flag & Mark_Predecessors){
GUARD_CU(preds.ForAll([]__host__ __device__
(VertexT *preds_, const SizeT &pos){
preds_[pos] = pos;
}, nodes, target, this->stream));
GUARD_CU(temp_preds.ForAll([]__host__ __device__
(Vertext *temp_preds_, const SizeT &pos){
temp_preds_[pos] = pos;
}, nodes, target, this->stream));
}
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
// Methods
/**
* @brief SSSPProblem default constructor
*/
Problem(
util::Parameters &_parameters,
ProblemFlag _flag = Problem_None) :
BaseProblem(_parameters, _flag),
data_slices(NULL)
{
}
/**
* @brief SSSPProblem default destructor
*/
virtual ~Problem()
{
Release();
}
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return hipError_t Error message(s), if any
*/
hipError_t Release(util::Location target = util::LOCATION_ALL)
{
hipError_t retval = hipSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL)
{
delete[] data_slices; data_slices=NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result distancess computed on GPUs back to host-side arrays.
* @param[out] h_distances Host array to store computed vertex distances from the source.
* \return hipError_t Error message(s), if any
*/
hipError_t Extract(
ValueT *h_distances,
VertexT *h_preds = NULL,
util::Location target = util::DEVICE)
{
hipError_t retval = hipSuccess;
SizeT nodes = this -> org_graph -> nodes;
if (this-> num_gpus == 1)
{
auto &data_slice = data_slices[0][0];
// Set device
if (target & target == util::DEVICE)
{
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
GUARD_CU(data_slice.distances.SetPointer(
h_distances, nodes, util::HOST));
GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST));
if (this->flag & Mark_Predecessors){
return retval;
}
GUARD_CU(data_slice.preds.SetPointer(h_preds, nodes, util::HOST));
GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST));
}
}
else
{ // num_gpus != 1
util::Array1D<SizeT, ValueT *> th_distances;
util::Array1D<SizeT, VertexT *> th_preds;
th_distances.SetName("SSSP_Test::Problem::Extract::th_distances");
th_preds.SetName("SSSP_Test::Problem::Extract::th_preds");
GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST));
GUARD_CU(th_preds.Allocate(this->num_gpus, util::HOST));
for (int gpu = 0; gpu < this->num_gpus; gpu++)
{
auto &data_slice = data_slices[gpu][0];
if (target == util::DEVICE)
{
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST));
if (this->flag & Mark_Predecessors){
GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST));
}
}
th_distances[gpu] = data_slice.distances.GetPointer(util::HOST);
th_preds[gpu] = data_slice.preds.GetPointer(util::HOST);
} //end for(gpu)
for (VertexT v = 0; v < nodes; v++)
{
int gpu = this -> org_graph -> GpT::partition_table[v];
VertexT v_ = v;
if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) != 0)
v_ = this -> org_graph -> GpT::convertion_table[v];
h_distances[v] = th_distances[gpu][v_];
}
GUARD_CU(th_distances.Release());
} //end if
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that SSSP processes on
* @param[in] Location Memory location to work on
* \return hipError_t Error message(s), if any
*/
hipError_t Init(
GraphT &graph,
util::Location target = util::DEVICE)
{
hipError_t retval = hipSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
if (this -> parameters.template Get<bool>("mark-pred"))
this -> flag = this -> flag | Mark_Predecessors;
for (int gpu = 0; gpu < this->num_gpus; gpu++)
{
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE)
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this -> sub_graphs[gpu],
this -> gpu_idx[gpu], target, this -> flag));
} // end for (gpu)
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return hipError_t Error message(s), if any
*/
hipError_t Reset(
VertexT src,
util::Location target = util::DEVICE)
{
hipError_t retval = hipSuccess;
for (int gpu = 0; gpu < this->num_gpus; ++gpu)
{
// Set device
if (target & util::DEVICE)
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu] -> Reset(target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
int gpu;
VertexT src_;
if (this->num_gpus <= 1)
{
gpu = 0; src_=src;
} else {
gpu = this -> org_graph -> partition_table[src];
if (this -> flag & partitioner::Keep_Node_Num)
src_ = src;
else
src_ = this -> org_graph -> GpT::convertion_table[src];
}
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(hipDeviceSynchronize(),
"hipDeviceSynchronize failed");
ValueT src_distance = 0;
if (target & util::HOST)
{
data_slices[gpu] -> distances[src_] = src_distance;
}
if (target & util::DEVICE)
{
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(hipDeviceSynchronize(),
"hipDeviceSynchronize failed");
}
GUARD_CU(data_slices[gpu]->distances.ForAll(
[src_] __host__ __device__ (ValueT *distances_, const SizeT &pos)
{
distances_[src_] = 0;
}, 1, target));
if (this -> flag & Mark_Predecessors){
GUARD_CU(data_slices[gpu]->preds.ForAll(
[src_] __host__ __device__ (VertexT *preds_, const SizeT &pos)
{
preds_[src_] = util::PreDefinedValues<VertexT>::InvalidValue;
}, 1, target));
}
if (target & util::DEVICE)
{
GUARD_CU2(hipDeviceSynchronize(),
"hipDeviceSynchronize failed");
}
return retval;
}
/** @} */
};
} //namespace Template
} //namespace app
} //namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| efe45181cb752164f7b428aa62b286db9051894f.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* template_problem.cuh
*
* @brief GPU Storage management Structure for Template Problem Data
*/
#pragma once
#include <gunrock/app/problem_base.cuh>
namespace gunrock {
namespace app {
namespace SSSP_Test {
/**
* @brief Speciflying parameters for SSSP Problem
* @param parameters The util::Parameter<...> structure holding all parameter info
* \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(
util::Parameters ¶meters)
{
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
GUARD_CU(parameters.Use<bool>(
"mark-pred",
util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false,
"Whether to mark predecessor info.",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Template Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _FLAG Problem flags
*/
template <
typename _GraphT,
typename _ValueT = typename _GraphT::ValueT,
typename _LabelT = typename _GraphT::LabelT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG>
{
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef _ValueT ValueT;
typedef _LabelT LabelT;
typedef ProblemBase <GraphT, FLAG> BaseProblem;
typedef DataSliceBase <GraphT, FLAG> BaseDataSlice;
//Helper structures
/**
* @brief Data structure containing SSSP-specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice
{
util::Array1D<SizeT, ValueT> distances ; // distances from source
util::Array1D<SizeT, LabelT> labels ; // labels to mark latest iterattion
util::Array1D<SizeT, VertexT> preds ; // predecessors of vertices
util::Array1D<SizeT, VertexT> temp_preds; // predecessors of vertices
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice()
{
distances .SetName("distances" );
labels .SetName("labels" );
preds .SetName("preds" );
temp_preds .SetName("temp_preds" );
}
/*
* @brief Default destructor
*/
virtual ~DataSlice()
{
Release();
}
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL)
{
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE)
GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(distances .Release(target));
GUARD_CU(labels .Release(target));
GUARD_CU(preds .Release(target));
GUARD_CU(temp_preds .Release(target));
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing sssp-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(
GraphT &sub_graph,
int gpu_idx = 0,
util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None)
{
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
GUARD_CU(distances .Allocate(sub_graph.nodes, target));
GUARD_CU(labels .Allocate(sub_graph.nodes, target));
if (this->flag & Mark_Predecessors){
GUARD_CU(temp_preds.Allocate(sub_graph.nodes, target));
GUARD_CU(preds .Allocate(sub_graph.nodes, target));
}
if (target & util::DEVICE)
{
GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this -> stream));
}
return retval;
} // Init
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE)
{
cudaError_t retval = cudaSuccess;
SizeT nodes = this -> sub_graph -> nodes;
// Ensure data are allocated
GUARD_CU(distances .EnsureSize_(nodes, target));
GUARD_CU(labels .EnsureSize_(nodes, target));
if (this->flag && Mark_Predecessors){
GUARD_CU(preds .EnsureSize_(nodes, target));
GUARD_CU(temp_preds .EnsureSize_(nodes, target));
}
// Reset data
GUARD_CU(labels.ForEach([]__host__ __device__
(LabelT &label){
label = util::PreDefinedValues<LabelT>::InvalidValue;
}, node, target, this->stream));
GUARD_CU(distances.ForEach([]__host__ __device__
(ValueT &distance){
distance = util::PreDefinedValues<ValueT>::MaxValue;
}, nodes, target, this -> stream));
if (this->flag & Mark_Predecessors){
GUARD_CU(preds.ForAll([]__host__ __device__
(VertexT *preds_, const SizeT &pos){
preds_[pos] = pos;
}, nodes, target, this->stream));
GUARD_CU(temp_preds.ForAll([]__host__ __device__
(Vertext *temp_preds_, const SizeT &pos){
temp_preds_[pos] = pos;
}, nodes, target, this->stream));
}
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
// Methods
/**
* @brief SSSPProblem default constructor
*/
Problem(
util::Parameters &_parameters,
ProblemFlag _flag = Problem_None) :
BaseProblem(_parameters, _flag),
data_slices(NULL)
{
}
/**
* @brief SSSPProblem default destructor
*/
virtual ~Problem()
{
Release();
}
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL)
{
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL)
{
delete[] data_slices; data_slices=NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result distancess computed on GPUs back to host-side arrays.
* @param[out] h_distances Host array to store computed vertex distances from the source.
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(
ValueT *h_distances,
VertexT *h_preds = NULL,
util::Location target = util::DEVICE)
{
cudaError_t retval = cudaSuccess;
SizeT nodes = this -> org_graph -> nodes;
if (this-> num_gpus == 1)
{
auto &data_slice = data_slices[0][0];
// Set device
if (target & target == util::DEVICE)
{
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
GUARD_CU(data_slice.distances.SetPointer(
h_distances, nodes, util::HOST));
GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST));
if (this->flag & Mark_Predecessors){
return retval;
}
GUARD_CU(data_slice.preds.SetPointer(h_preds, nodes, util::HOST));
GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST));
}
}
else
{ // num_gpus != 1
util::Array1D<SizeT, ValueT *> th_distances;
util::Array1D<SizeT, VertexT *> th_preds;
th_distances.SetName("SSSP_Test::Problem::Extract::th_distances");
th_preds.SetName("SSSP_Test::Problem::Extract::th_preds");
GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST));
GUARD_CU(th_preds.Allocate(this->num_gpus, util::HOST));
for (int gpu = 0; gpu < this->num_gpus; gpu++)
{
auto &data_slice = data_slices[gpu][0];
if (target == util::DEVICE)
{
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slice.distances.Move(util::DEVICE, util::HOST));
if (this->flag & Mark_Predecessors){
GUARD_CU(data_slice.preds.Move(util::DEVICE, util::HOST));
}
}
th_distances[gpu] = data_slice.distances.GetPointer(util::HOST);
th_preds[gpu] = data_slice.preds.GetPointer(util::HOST);
} //end for(gpu)
for (VertexT v = 0; v < nodes; v++)
{
int gpu = this -> org_graph -> GpT::partition_table[v];
VertexT v_ = v;
if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) != 0)
v_ = this -> org_graph -> GpT::convertion_table[v];
h_distances[v] = th_distances[gpu][v_];
}
GUARD_CU(th_distances.Release());
} //end if
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that SSSP processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(
GraphT &graph,
util::Location target = util::DEVICE)
{
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
if (this -> parameters.template Get<bool>("mark-pred"))
this -> flag = this -> flag | Mark_Predecessors;
for (int gpu = 0; gpu < this->num_gpus; gpu++)
{
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE)
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this -> sub_graphs[gpu],
this -> gpu_idx[gpu], target, this -> flag));
} // end for (gpu)
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(
VertexT src,
util::Location target = util::DEVICE)
{
cudaError_t retval = cudaSuccess;
for (int gpu = 0; gpu < this->num_gpus; ++gpu)
{
// Set device
if (target & util::DEVICE)
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu] -> Reset(target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
int gpu;
VertexT src_;
if (this->num_gpus <= 1)
{
gpu = 0; src_=src;
} else {
gpu = this -> org_graph -> partition_table[src];
if (this -> flag & partitioner::Keep_Node_Num)
src_ = src;
else
src_ = this -> org_graph -> GpT::convertion_table[src];
}
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(cudaDeviceSynchronize(),
"cudaDeviceSynchronize failed");
ValueT src_distance = 0;
if (target & util::HOST)
{
data_slices[gpu] -> distances[src_] = src_distance;
}
if (target & util::DEVICE)
{
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(cudaDeviceSynchronize(),
"cudaDeviceSynchronize failed");
}
GUARD_CU(data_slices[gpu]->distances.ForAll(
[src_] __host__ __device__ (ValueT *distances_, const SizeT &pos)
{
distances_[src_] = 0;
}, 1, target));
if (this -> flag & Mark_Predecessors){
GUARD_CU(data_slices[gpu]->preds.ForAll(
[src_] __host__ __device__ (VertexT *preds_, const SizeT &pos)
{
preds_[src_] = util::PreDefinedValues<VertexT>::InvalidValue;
}, 1, target));
}
if (target & util::DEVICE)
{
GUARD_CU2(cudaDeviceSynchronize(),
"cudaDeviceSynchronize failed");
}
return retval;
}
/** @} */
};
} //namespace Template
} //namespace app
} //namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
a0d5ca9314feab74db100156ea7b78b251eb5fdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define NUM 1048576
#define NUM_THREADS 512
#define NUM_BLOCKS 2048
/* Function to sort threads in each block using merge sort */
__global__ void sort_blocks(int *a)
{
int i=2;
__shared__ int temp [NUM_THREADS];
while (i <= NUM_THREADS)
{
if ((threadIdx.x % i)==0)
{
int begin1 = threadIdx.x + (blockIdx.x * blockDim.x);
int end1 = begin1 + i/2;
int begin2 = end1;
int end2 = begin2 + i/2;
int target = threadIdx.x;
do
{
if ((begin1 == end1) && (begin2 < end2))
temp[target++] = a[begin2++];
else if ((begin2 == end2) && (begin1 < end1))
temp[target++] = a[begin1++];
else if (a[begin1] < a[begin2])
temp[target++] = a[begin1++];
else
temp[target++] = a[begin2++];
}
while ((begin1!=end1) && (begin2!=end2));
}
__syncthreads();
a[threadIdx.x + (blockIdx.x*blockDim.x)] = temp[threadIdx.x];
__syncthreads();
i *= 2;
}
}
/* Function to merge the sorted blocks using merge sort */
__global__ void merge_blocks(int *a, int *temp, int sortedsize)
{
int id = blockIdx.x;
int begin1 = id * 2 * sortedsize;
int end1 = begin1 + sortedsize;
int begin2 = end1;
int end2 = begin2 + sortedsize;
int target = id * 2 * sortedsize;
do
{
if ((begin1 == end1) && (begin2 < end2))
temp[target++] = a[begin2++];
else if ((begin2 == end2) && (begin1 < end1))
temp[target++] = a[begin1++];
else if (a[begin1] < a[begin2])
temp[target++] = a[begin1++];
else
temp[target++] = a[begin2++];
}
while ((begin1!=end1) && (begin2!=end2));
}
int main()
{
int *a = (int *) malloc (NUM * sizeof (int));
int *dev_a, *dev_temp;
hipMalloc((void **) &dev_a, NUM*sizeof(int));
hipMalloc((void **) &dev_temp, NUM*sizeof(int));
for (int i = 0; i < NUM; i++)
{
a[i] = rand () % 10000;
}
/* timing */
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/* timing */
hipMemcpy(dev_a, a, NUM*sizeof(int), hipMemcpyHostToDevice);
/* Sort the elements corresponding to the threads in each block */
hipLaunchKernelGGL(( sort_blocks), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, dev_a);
hipMemcpy(a, dev_a, NUM*sizeof(int), hipMemcpyDeviceToHost);
/* Merge the sorted blocks */
int blocks = NUM_BLOCKS/2;
int sortedsize = NUM_THREADS;
while (blocks > 0)
{
hipLaunchKernelGGL(( merge_blocks), dim3(blocks), dim3(1), 0, 0, dev_a, dev_temp, sortedsize);
hipMemcpy (dev_a, dev_temp, NUM*sizeof(int), hipMemcpyDeviceToDevice);
blocks /= 2;
sortedsize *= 2;
}
hipMemcpy(a, dev_a, NUM*sizeof(int), hipMemcpyDeviceToHost);
/* timing */
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
/* timing */
bool passed = true;
for(int i = 1; i < NUM; i++)
{
if (a [i-1] > a [i])
passed = false;
}
printf("\nTest %s\n", passed ? "PASSED" : "FAILED");
printf("Time : %f\n", time);
hipDeviceReset();
return 0;
}
| a0d5ca9314feab74db100156ea7b78b251eb5fdf.cu | #include <stdio.h>
#include <stdlib.h>
#define NUM 1048576
#define NUM_THREADS 512
#define NUM_BLOCKS 2048
/* Function to sort threads in each block using merge sort */
__global__ void sort_blocks(int *a)
{
int i=2;
__shared__ int temp [NUM_THREADS];
while (i <= NUM_THREADS)
{
if ((threadIdx.x % i)==0)
{
int begin1 = threadIdx.x + (blockIdx.x * blockDim.x);
int end1 = begin1 + i/2;
int begin2 = end1;
int end2 = begin2 + i/2;
int target = threadIdx.x;
do
{
if ((begin1 == end1) && (begin2 < end2))
temp[target++] = a[begin2++];
else if ((begin2 == end2) && (begin1 < end1))
temp[target++] = a[begin1++];
else if (a[begin1] < a[begin2])
temp[target++] = a[begin1++];
else
temp[target++] = a[begin2++];
}
while ((begin1!=end1) && (begin2!=end2));
}
__syncthreads();
a[threadIdx.x + (blockIdx.x*blockDim.x)] = temp[threadIdx.x];
__syncthreads();
i *= 2;
}
}
/* Function to merge the sorted blocks using merge sort */
__global__ void merge_blocks(int *a, int *temp, int sortedsize)
{
int id = blockIdx.x;
int begin1 = id * 2 * sortedsize;
int end1 = begin1 + sortedsize;
int begin2 = end1;
int end2 = begin2 + sortedsize;
int target = id * 2 * sortedsize;
do
{
if ((begin1 == end1) && (begin2 < end2))
temp[target++] = a[begin2++];
else if ((begin2 == end2) && (begin1 < end1))
temp[target++] = a[begin1++];
else if (a[begin1] < a[begin2])
temp[target++] = a[begin1++];
else
temp[target++] = a[begin2++];
}
while ((begin1!=end1) && (begin2!=end2));
}
int main()
{
int *a = (int *) malloc (NUM * sizeof (int));
int *dev_a, *dev_temp;
cudaMalloc((void **) &dev_a, NUM*sizeof(int));
cudaMalloc((void **) &dev_temp, NUM*sizeof(int));
for (int i = 0; i < NUM; i++)
{
a[i] = rand () % 10000;
}
/* timing */
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* timing */
cudaMemcpy(dev_a, a, NUM*sizeof(int), cudaMemcpyHostToDevice);
/* Sort the elements corresponding to the threads in each block */
sort_blocks<<<NUM_BLOCKS, NUM_THREADS>>>(dev_a);
cudaMemcpy(a, dev_a, NUM*sizeof(int), cudaMemcpyDeviceToHost);
/* Merge the sorted blocks */
int blocks = NUM_BLOCKS/2;
int sortedsize = NUM_THREADS;
while (blocks > 0)
{
merge_blocks<<<blocks, 1>>>(dev_a, dev_temp, sortedsize);
cudaMemcpy (dev_a, dev_temp, NUM*sizeof(int), cudaMemcpyDeviceToDevice);
blocks /= 2;
sortedsize *= 2;
}
cudaMemcpy(a, dev_a, NUM*sizeof(int), cudaMemcpyDeviceToHost);
/* timing */
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
/* timing */
bool passed = true;
for(int i = 1; i < NUM; i++)
{
if (a [i-1] > a [i])
passed = false;
}
printf("\nTest %s\n", passed ? "PASSED" : "FAILED");
printf("Time : %f\n", time);
cudaThreadExit();
return 0;
}
|
cd89823efd4101707b80cc64820f2600b0ab6718.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,int var_11,int var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float* var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float* var_33,float var_34,float var_35,float var_36,float var_37,float var_38) {
if (comp >= -1.1278E-36f * powf(var_2 * ceilf((var_3 - (+0.0f * +1.1754E14f))), (var_4 / var_5 / (-1.6095E34f * atanf(+1.4243E19f - var_6))))) {
if (comp <= var_7 + var_8 / -1.6767E10f * var_9 / (-0.0f + +1.9388E-41f)) {
for (int i=0; i < var_1; ++i) {
if (comp <= (+1.9397E-41f / asinf(+1.4536E-37f / var_10 * (+1.4471E-20f / (-0.0f + +1.6621E-35f))))) {
float tmp_1 = var_13 - (+0.0f + var_14);
comp = tmp_1 / var_15 * (-0.0f - var_16);
comp = var_17 / (var_18 + (var_19 + var_20));
for (int i=0; i < var_11; ++i) {
float tmp_2 = (+0.0f / (var_23 - -0.0f));
var_21[i] = +0.0f;
var_22[i] = (-0.0f / (var_24 + ceilf(+1.6357E-42f)));
comp += var_22[i] * var_21[i] - tmp_2 / (var_25 * (var_26 * (-1.9806E-42f + var_27)));
}
if (comp <= -1.3492E-36f * var_28 * (-1.8482E4f - var_29)) {
float tmp_3 = +0.0f;
float tmp_4 = -1.1884E-26f;
comp += tmp_4 - tmp_3 * (+1.2614E-35f / (+1.8342E-35f * var_30));
comp += (var_31 / var_32);
}
for (int i=0; i < var_12; ++i) {
comp += (+1.2738E-10f + +1.5006E14f);
comp = fabsf(var_34 * var_35 + -1.5396E-12f * var_36);
var_33[i] = -0.0f * (-1.0610E-35f / (-0.0f * var_37));
comp += var_33[i] + -1.5176E-36f / (var_38 + +1.4360E35f);
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
int tmp_12 = atoi(argv[12]);
int tmp_13 = atoi(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float* tmp_22 = initPointer( atof(argv[22]) );
float* tmp_23 = initPointer( atof(argv[23]) );
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float* tmp_34 = initPointer( atof(argv[34]) );
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39);
hipDeviceSynchronize();
return 0;
}
| cd89823efd4101707b80cc64820f2600b0ab6718.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,int var_11,int var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float* var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float* var_33,float var_34,float var_35,float var_36,float var_37,float var_38) {
if (comp >= -1.1278E-36f * powf(var_2 * ceilf((var_3 - (+0.0f * +1.1754E14f))), (var_4 / var_5 / (-1.6095E34f * atanf(+1.4243E19f - var_6))))) {
if (comp <= var_7 + var_8 / -1.6767E10f * var_9 / (-0.0f + +1.9388E-41f)) {
for (int i=0; i < var_1; ++i) {
if (comp <= (+1.9397E-41f / asinf(+1.4536E-37f / var_10 * (+1.4471E-20f / (-0.0f + +1.6621E-35f))))) {
float tmp_1 = var_13 - (+0.0f + var_14);
comp = tmp_1 / var_15 * (-0.0f - var_16);
comp = var_17 / (var_18 + (var_19 + var_20));
for (int i=0; i < var_11; ++i) {
float tmp_2 = (+0.0f / (var_23 - -0.0f));
var_21[i] = +0.0f;
var_22[i] = (-0.0f / (var_24 + ceilf(+1.6357E-42f)));
comp += var_22[i] * var_21[i] - tmp_2 / (var_25 * (var_26 * (-1.9806E-42f + var_27)));
}
if (comp <= -1.3492E-36f * var_28 * (-1.8482E4f - var_29)) {
float tmp_3 = +0.0f;
float tmp_4 = -1.1884E-26f;
comp += tmp_4 - tmp_3 * (+1.2614E-35f / (+1.8342E-35f * var_30));
comp += (var_31 / var_32);
}
for (int i=0; i < var_12; ++i) {
comp += (+1.2738E-10f + +1.5006E14f);
comp = fabsf(var_34 * var_35 + -1.5396E-12f * var_36);
var_33[i] = -0.0f * (-1.0610E-35f / (-0.0f * var_37));
comp += var_33[i] + -1.5176E-36f / (var_38 + +1.4360E35f);
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
int tmp_12 = atoi(argv[12]);
int tmp_13 = atoi(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float* tmp_22 = initPointer( atof(argv[22]) );
float* tmp_23 = initPointer( atof(argv[23]) );
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float* tmp_34 = initPointer( atof(argv[34]) );
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39);
cudaDeviceSynchronize();
return 0;
}
|
30f2202ea9f07587053fa99ef6a27bf620ee54b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
if (comp <= fmodf((var_1 * -1.8254E-35f / -1.6689E35f / (var_2 * (var_3 / var_4))), -1.7343E21f)) {
float tmp_1 = var_6 * -1.6855E-36f;
float tmp_2 = +0.0f;
comp = tmp_2 * tmp_1 * var_7 - logf(var_8 - +1.1338E-23f);
comp = var_9 + (+0.0f * var_10 / var_11 * -1.3318E34f);
if (comp <= var_12 / var_13 - var_14 * acosf(var_15 + +1.3878E-42f * var_16 * sinf(fmodf(var_17 - var_18, (+1.3409E34f - var_19 / (-0.0f - powf(var_20 / (+1.5133E10f / var_21 + (var_22 + var_23)), -1.0736E34f))))))) {
comp = var_24 + floorf(acosf((var_25 / var_26)));
float tmp_3 = -1.1663E36f;
float tmp_4 = -1.2158E36f;
comp += tmp_4 + tmp_3 - sqrtf(var_27 - +1.5496E-41f);
}
for (int i=0; i < var_5; ++i) {
float tmp_5 = (var_28 - (-1.7661E29f - var_29));
comp += tmp_5 - var_30 + -1.1375E-20f / +1.5173E-19f;
comp += (+1.7534E35f / var_31 / var_32 * +1.1428E36f - -1.8270E36f);
comp += floorf((var_33 * -1.0072E-37f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
hipDeviceSynchronize();
return 0;
}
| 30f2202ea9f07587053fa99ef6a27bf620ee54b4.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
if (comp <= fmodf((var_1 * -1.8254E-35f / -1.6689E35f / (var_2 * (var_3 / var_4))), -1.7343E21f)) {
float tmp_1 = var_6 * -1.6855E-36f;
float tmp_2 = +0.0f;
comp = tmp_2 * tmp_1 * var_7 - logf(var_8 - +1.1338E-23f);
comp = var_9 + (+0.0f * var_10 / var_11 * -1.3318E34f);
if (comp <= var_12 / var_13 - var_14 * acosf(var_15 + +1.3878E-42f * var_16 * sinf(fmodf(var_17 - var_18, (+1.3409E34f - var_19 / (-0.0f - powf(var_20 / (+1.5133E10f / var_21 + (var_22 + var_23)), -1.0736E34f))))))) {
comp = var_24 + floorf(acosf((var_25 / var_26)));
float tmp_3 = -1.1663E36f;
float tmp_4 = -1.2158E36f;
comp += tmp_4 + tmp_3 - sqrtf(var_27 - +1.5496E-41f);
}
for (int i=0; i < var_5; ++i) {
float tmp_5 = (var_28 - (-1.7661E29f - var_29));
comp += tmp_5 - var_30 + -1.1375E-20f / +1.5173E-19f;
comp += (+1.7534E35f / var_31 / var_32 * +1.1428E36f - -1.8270E36f);
comp += floorf((var_33 * -1.0072E-37f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
54d4ee331776a1e2a4f8b20e0ff85aab4b882238.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixMul_kernel(float * A, float * B, float * C, int N)
{
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N)
{
// each thread computes one elem of the block sub-matrix
for (int i = 0; i < N; i++)
{
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
} | 54d4ee331776a1e2a4f8b20e0ff85aab4b882238.cu | #include "includes.h"
__global__ void matrixMul_kernel(float * A, float * B, float * C, int N)
{
int ROW = blockIdx.y * blockDim.y + threadIdx.y;
int COL = blockIdx.x * blockDim.x + threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N)
{
// each thread computes one elem of the block sub-matrix
for (int i = 0; i < N; i++)
{
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
}
C[ROW * N + COL] = tmpSum;
} |
4c5aebdfba7f956f97b01db336f8122f7365e276.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
#include "support/partitioner.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void Histogram_kernel(int size, int bins, int n_tasks, float alpha, unsigned int *data,
unsigned int *histo
#ifdef CUDA_8_0
, int *worklist
#endif
) {
extern __shared__ unsigned int l_mem[];
unsigned int* l_histo = l_mem;
#ifdef CUDA_8_0
int* l_tmp = (int*)&l_histo[bins];
#endif
#ifdef CUDA_8_0
Partitioner p = partitioner_create(n_tasks, alpha, worklist, l_tmp);
#else
Partitioner p = partitioner_create(n_tasks, alpha);
#endif
// Block and thread index
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int bD = blockDim.x;
const int gD = gridDim.x;
// Sub-histograms initialization
for(int pos = tx; pos < bins; pos += bD) {
l_histo[pos] = 0;
}
__syncthreads(); // Intra-block synchronization
// Main loop
for(int i = gpu_first(&p); gpu_more(&p); i = gpu_next(&p)) {
// Global memory read
unsigned int d = data[i * bD + tx];
// Atomic vote in shared memory
atomicAdd(&l_histo[((d * bins) >> 12)], 1);
}
__syncthreads(); // Intra-block synchronization
// Merge per-block histograms and write to global memory
for(int pos = tx; pos < bins; pos += bD) {
// Atomic addition in global memory
#ifdef CUDA_8_0
atomicAdd_system(histo + pos, l_histo[pos]);
#else
atomicAdd(histo + pos, l_histo[pos]);
#endif
}
}
hipError_t call_Histogram_kernel(int blocks, int threads, int size, int bins, int n_tasks, float alpha,
unsigned int *data, unsigned int *histo, int l_mem_size
#ifdef CUDA_8_0
, int* worklist
#endif
){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hipLaunchKernelGGL(( Histogram_kernel), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, size, bins, n_tasks, alpha,
data, histo
#ifdef CUDA_8_0
, worklist
#endif
);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
hipError_t err = hipGetLastError();
return err;
}
| 4c5aebdfba7f956f97b01db336f8122f7365e276.cu | #ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
#include "support/partitioner.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void Histogram_kernel(int size, int bins, int n_tasks, float alpha, unsigned int *data,
unsigned int *histo
#ifdef CUDA_8_0
, int *worklist
#endif
) {
extern __shared__ unsigned int l_mem[];
unsigned int* l_histo = l_mem;
#ifdef CUDA_8_0
int* l_tmp = (int*)&l_histo[bins];
#endif
#ifdef CUDA_8_0
Partitioner p = partitioner_create(n_tasks, alpha, worklist, l_tmp);
#else
Partitioner p = partitioner_create(n_tasks, alpha);
#endif
// Block and thread index
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int bD = blockDim.x;
const int gD = gridDim.x;
// Sub-histograms initialization
for(int pos = tx; pos < bins; pos += bD) {
l_histo[pos] = 0;
}
__syncthreads(); // Intra-block synchronization
// Main loop
for(int i = gpu_first(&p); gpu_more(&p); i = gpu_next(&p)) {
// Global memory read
unsigned int d = data[i * bD + tx];
// Atomic vote in shared memory
atomicAdd(&l_histo[((d * bins) >> 12)], 1);
}
__syncthreads(); // Intra-block synchronization
// Merge per-block histograms and write to global memory
for(int pos = tx; pos < bins; pos += bD) {
// Atomic addition in global memory
#ifdef CUDA_8_0
atomicAdd_system(histo + pos, l_histo[pos]);
#else
atomicAdd(histo + pos, l_histo[pos]);
#endif
}
}
cudaError_t call_Histogram_kernel(int blocks, int threads, int size, int bins, int n_tasks, float alpha,
unsigned int *data, unsigned int *histo, int l_mem_size
#ifdef CUDA_8_0
, int* worklist
#endif
){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
Histogram_kernel<<<dimGrid, dimBlock, l_mem_size>>>(size, bins, n_tasks, alpha,
data, histo
#ifdef CUDA_8_0
, worklist
#endif
);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
cudaError_t err = cudaGetLastError();
return err;
}
|
599afd5ab83d1508bba54ed47bf25740b390c60f.hip | // !!! This is a file automatically generated by hipify!!!
#include "GPU_Setup.h"
#include <stdio.h>
#include <thrust/host_vector.h>
void initOGL_CUDA()
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipGLSetGLDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
} | 599afd5ab83d1508bba54ed47bf25740b390c60f.cu | #include "GPU_Setup.h"
#include <stdio.h>
#include <thrust/host_vector.h>
void initOGL_CUDA()
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaGLSetGLDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
} |
6b1503154630e84016b85a1338b4d81f26143dc0.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <thrust/device_vector.h>
#include "matmul_processor.hpp"
using namespace thrust;
using namespace structured;
using namespace std;
int div_ceil(int numerator, int denominator);
template <>
void MatMulProc<float>::gpuMultMat(
const float* A, const float* B,
float* C, int batch, const float alpha, const float beta) {
host_vector<const float*> Aarray(batch);
host_vector<const float*> Barray(batch);
host_vector<float*> Carray(batch);
int lda = transA ? M : K;
int ldb = transB ? K : N;
hipblasOperation_t cuTransA = transA == transC ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == transC ? HIPBLAS_OP_N : HIPBLAS_OP_T;
for(int i=0; i<batch; i++){
Aarray[i] = A + M*K*i;
Barray[i] = B + K*N*i;
Carray[i] = C + M*N*i;
}
device_vector<const float*> devA(Aarray);
device_vector<const float*> devB(Barray);
device_vector<float*> devC(Carray);
if(transC) CUBLAS_CHECK(
hipblasSgemmBatched(cublas_handle(), cuTransA, cuTransB, M, N, K, &alpha,
thrust::raw_pointer_cast(devA.data()), lda,
thrust::raw_pointer_cast(devB.data()), ldb, &beta,
thrust::raw_pointer_cast(devC.data()), M, batch));
else CUBLAS_CHECK(
hipblasSgemmBatched(cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha,
thrust::raw_pointer_cast(devB.data()), ldb,
thrust::raw_pointer_cast(devA.data()), lda, &beta,
thrust::raw_pointer_cast(devC.data()), N, batch));
}
template <>
void MatMulProc<double>::gpuMultMat(
const double* A, const double* B,
double* C, int batch, const double alpha, const double beta){
host_vector<const double*> Aarray(batch);
host_vector<const double*> Barray(batch);
host_vector<double*> Carray(batch);
int lda = transA ? M : K;
int ldb = transB ? K : N;
hipblasOperation_t cuTransA = transA == transC ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB = transB == transC ? HIPBLAS_OP_N : HIPBLAS_OP_T;
for(int i=0; i<batch; i++){
Aarray[i] = A + M*K*i;
Barray[i] = B + K*N*i;
Carray[i] = C + M*N*i;
}
device_vector<const double*> devA(Aarray);
device_vector<const double*> devB(Barray);
device_vector<double*> devC(Carray);
if(transC) CUBLAS_CHECK(
hipblasDgemmBatched(cublas_handle(), cuTransA, cuTransB, M, N, K, &alpha,
thrust::raw_pointer_cast(devA.data()), lda,
thrust::raw_pointer_cast(devB.data()), ldb, &beta,
thrust::raw_pointer_cast(devC.data()), M, batch));
else CUBLAS_CHECK(
hipblasDgemmBatched(cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha,
thrust::raw_pointer_cast(devB.data()), ldb,
thrust::raw_pointer_cast(devA.data()), lda, &beta,
thrust::raw_pointer_cast(devC.data()), N, batch));
}
template struct MatMulProc<float>;
template struct MatMulProc<double>;
| 6b1503154630e84016b85a1338b4d81f26143dc0.cu | #include <cstdlib>
#include <thrust/device_vector.h>
#include "matmul_processor.hpp"
using namespace thrust;
using namespace structured;
using namespace std;
int div_ceil(int numerator, int denominator);
template <>
void MatMulProc<float>::gpuMultMat(
const float* A, const float* B,
float* C, int batch, const float alpha, const float beta) {
host_vector<const float*> Aarray(batch);
host_vector<const float*> Barray(batch);
host_vector<float*> Carray(batch);
int lda = transA ? M : K;
int ldb = transB ? K : N;
cublasOperation_t cuTransA = transA == transC ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == transC ? CUBLAS_OP_N : CUBLAS_OP_T;
for(int i=0; i<batch; i++){
Aarray[i] = A + M*K*i;
Barray[i] = B + K*N*i;
Carray[i] = C + M*N*i;
}
device_vector<const float*> devA(Aarray);
device_vector<const float*> devB(Barray);
device_vector<float*> devC(Carray);
if(transC) CUBLAS_CHECK(
cublasSgemmBatched(cublas_handle(), cuTransA, cuTransB, M, N, K, &alpha,
thrust::raw_pointer_cast(devA.data()), lda,
thrust::raw_pointer_cast(devB.data()), ldb, &beta,
thrust::raw_pointer_cast(devC.data()), M, batch));
else CUBLAS_CHECK(
cublasSgemmBatched(cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha,
thrust::raw_pointer_cast(devB.data()), ldb,
thrust::raw_pointer_cast(devA.data()), lda, &beta,
thrust::raw_pointer_cast(devC.data()), N, batch));
}
template <>
void MatMulProc<double>::gpuMultMat(
const double* A, const double* B,
double* C, int batch, const double alpha, const double beta){
host_vector<const double*> Aarray(batch);
host_vector<const double*> Barray(batch);
host_vector<double*> Carray(batch);
int lda = transA ? M : K;
int ldb = transB ? K : N;
cublasOperation_t cuTransA = transA == transC ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB = transB == transC ? CUBLAS_OP_N : CUBLAS_OP_T;
for(int i=0; i<batch; i++){
Aarray[i] = A + M*K*i;
Barray[i] = B + K*N*i;
Carray[i] = C + M*N*i;
}
device_vector<const double*> devA(Aarray);
device_vector<const double*> devB(Barray);
device_vector<double*> devC(Carray);
if(transC) CUBLAS_CHECK(
cublasDgemmBatched(cublas_handle(), cuTransA, cuTransB, M, N, K, &alpha,
thrust::raw_pointer_cast(devA.data()), lda,
thrust::raw_pointer_cast(devB.data()), ldb, &beta,
thrust::raw_pointer_cast(devC.data()), M, batch));
else CUBLAS_CHECK(
cublasDgemmBatched(cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha,
thrust::raw_pointer_cast(devB.data()), ldb,
thrust::raw_pointer_cast(devA.data()), lda, &beta,
thrust::raw_pointer_cast(devC.data()), N, batch));
}
template struct MatMulProc<float>;
template struct MatMulProc<double>;
|
2e37d926c73dd1907b45c8c9c20e4ac95f1b0c4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/arithmetic_ops.h"
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/arithmetic.h"
#include "chainerx/routines/arithmetic.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(AddKernel, { out = ArithmeticOps<CudaType>::Add(x1, x2); });
template <typename T>
struct AddASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); }
CudaType x2;
};
class CudaAddASKernel : public AddASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(AddASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(AddASKernel, CudaAddASKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(SubtractKernel, { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }, VisitNumericDtype);
template <typename T>
struct SubtractASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }
CudaType x2;
};
class CudaSubtractASKernel : public SubtractASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(SubtractASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SubtractASKernel, CudaSubtractASKernel);
// TODO(sonots): support stream
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(MultiplyKernel, { out = ArithmeticOps<CudaType>::Multiply(x1, x2); });
template <typename T>
struct MultiplyASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); }
CudaType x2;
};
class CudaMultiplyASKernel : public MultiplyASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(MultiplyASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(MultiplyASKernel, CudaMultiplyASKernel);
// CUDA does not have std::div, which is used for the native backend.
template <typename T>
__device__ T FloorDivideImpl(T x, T y) {
if (y == 0) {
return 0;
}
return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0);
}
__device__ int8_t FloorDivide(int8_t x, int8_t y) { return FloorDivideImpl(x, y); }
__device__ int16_t FloorDivide(int16_t x, int16_t y) { return FloorDivideImpl(x, y); }
__device__ int32_t FloorDivide(int32_t x, int32_t y) { return FloorDivideImpl(x, y); }
__device__ int64_t FloorDivide(int64_t x, int64_t y) { return FloorDivideImpl(x, y); }
__device__ uint8_t FloorDivide(uint8_t x, uint8_t y) {
if (y == 0) {
return 0;
}
return x / y;
}
__device__ float FloorDivide(float x, float y) {
float rem = ::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ double FloorDivide(double x, double y) {
double rem = ::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ cuda::Float16 FloorDivide(cuda::Float16 x, cuda::Float16 y) {
return cuda::Float16{FloorDivide(static_cast<float>(x), static_cast<float>(y))};
}
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(FloorDivideKernel, { out = cuda::FloorDivide(x1, x2); }, VisitNumericDtype);
template <typename T>
struct FloorDivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
CudaType x2;
};
class CudaFloorDivideASKernel : public FloorDivideASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(FloorDivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(FloorDivideASKernel, CudaFloorDivideASKernel);
template <typename T>
struct FloorDivideSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
CudaType x1;
};
class CudaFloorDivideSAKernel : public FloorDivideSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) override {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(FloorDivideSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(FloorDivideSAKernel, CudaFloorDivideSAKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(DivideKernel, { out = ArithmeticOps<CudaType>::Divide(x1, x2); });
template <typename T>
struct DivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
CudaType x2;
};
class CudaDivideASKernel : public DivideASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(DivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(DivideASKernel, CudaDivideASKernel);
template <typename T>
struct DivideSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
CudaType x1;
};
class CudaDivideSAKernel : public DivideSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) override {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(DivideSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(DivideSAKernel, CudaDivideSAKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(PowerKernel, { out = cuda::Power(x1, x2); }, VisitNumericDtype);
template <typename T>
struct PowerASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::Power(x1, x2); }
CudaType x2;
};
class CudaPowerASKernel : public PowerASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(PowerASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(PowerASKernel, CudaPowerASKernel);
template <typename T>
struct PowerSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = cuda::Power(x1, x2); }
CudaType x1;
};
class CudaPowerSAKernel : public PowerSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(PowerSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(PowerSAKernel, CudaPowerSAKernel);
// CUDA does not have std::mod, which is used for the native backend.
template <typename T>
__device__ T ModSignedIntegerImpl(T x, T y) {
if (x == 0 || y == 0) {
return 0;
}
T ret = x % y;
if ((ret > 0 && y < 0) || (ret < 0 && y > 0)) {
return y + ret;
}
return ret;
}
__device__ int8_t Mod(int8_t x, int8_t y) { return ModSignedIntegerImpl(x, y); }
__device__ int16_t Mod(int16_t x, int16_t y) { return ModSignedIntegerImpl(x, y); }
__device__ int32_t Mod(int32_t x, int32_t y) { return ModSignedIntegerImpl(x, y); }
__device__ int64_t Mod(int64_t x, int64_t y) { return ModSignedIntegerImpl(x, y); }
__device__ uint8_t Mod(uint8_t x, uint8_t y) {
if (x == 0 || y == 0) {
return 0;
}
return x % y;
}
template <typename T>
__device__ T ModFloatImpl(T x, T y) {
if (y == 0) {
return NAN;
}
T ret = ::fmod(x, y);
if ((ret > 0 && y < 0) || (ret < 0 && y > 0)) {
return y + ret;
}
return ret;
}
__device__ double Mod(double x, double y) { return ModFloatImpl(x, y); }
__device__ float Mod(float x, float y) { return ModFloatImpl(x, y); }
__device__ cuda::Float16 Mod(cuda::Float16 x, cuda::Float16 y) { return cuda::Float16{Mod(static_cast<float>(x), static_cast<float>(y))}; }
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(ModAAKernel, { out = cuda::Mod(x1, x2); }, VisitNumericDtype);
template <typename T>
struct ModASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::Mod(x1, x2); }
CudaType x2;
};
class CudaModASKernel : public ModASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(ModASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ModASKernel, CudaModASKernel);
template <typename T>
struct ModSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = cuda::Mod(x1, x2); }
CudaType x1;
};
class CudaModSAKernel : public ModSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) override {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(ModSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ModSAKernel, CudaModSAKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(FmodKernel, { out = cuda::Fmod(x1, x2); });
} // namespace
} // namespace cuda
} // namespace chainerx
| 2e37d926c73dd1907b45c8c9c20e4ac95f1b0c4e.cu | #include "chainerx/cuda/cuda_device.h"
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/arithmetic_ops.h"
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/float16.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/arithmetic.h"
#include "chainerx/routines/arithmetic.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(AddKernel, { out = ArithmeticOps<CudaType>::Add(x1, x2); });
template <typename T>
struct AddASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Add(x1, x2); }
CudaType x2;
};
class CudaAddASKernel : public AddASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(AddASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(AddASKernel, CudaAddASKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(SubtractKernel, { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }, VisitNumericDtype);
template <typename T>
struct SubtractASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Subtract(x1, x2); }
CudaType x2;
};
class CudaSubtractASKernel : public SubtractASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(SubtractASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(SubtractASKernel, CudaSubtractASKernel);
// TODO(sonots): support stream
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(MultiplyKernel, { out = ArithmeticOps<CudaType>::Multiply(x1, x2); });
template <typename T>
struct MultiplyASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Multiply(x1, x2); }
CudaType x2;
};
class CudaMultiplyASKernel : public MultiplyASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(MultiplyASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(MultiplyASKernel, CudaMultiplyASKernel);
// CUDA does not have std::div, which is used for the native backend.
template <typename T>
__device__ T FloorDivideImpl(T x, T y) {
if (y == 0) {
return 0;
}
return x / y - ((y >= 0 ? x % y : -(x % y)) < 0 ? 1 : 0);
}
__device__ int8_t FloorDivide(int8_t x, int8_t y) { return FloorDivideImpl(x, y); }
__device__ int16_t FloorDivide(int16_t x, int16_t y) { return FloorDivideImpl(x, y); }
__device__ int32_t FloorDivide(int32_t x, int32_t y) { return FloorDivideImpl(x, y); }
__device__ int64_t FloorDivide(int64_t x, int64_t y) { return FloorDivideImpl(x, y); }
__device__ uint8_t FloorDivide(uint8_t x, uint8_t y) {
if (y == 0) {
return 0;
}
return x / y;
}
__device__ float FloorDivide(float x, float y) {
float rem = std::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ double FloorDivide(double x, double y) {
double rem = std::fmod(x, y);
return (x - rem) / y - ((rem < 0 && y > 0) || (rem > 0 && y < 0) ? 1 : 0);
}
__device__ cuda::Float16 FloorDivide(cuda::Float16 x, cuda::Float16 y) {
return cuda::Float16{FloorDivide(static_cast<float>(x), static_cast<float>(y))};
}
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(FloorDivideKernel, { out = cuda::FloorDivide(x1, x2); }, VisitNumericDtype);
template <typename T>
struct FloorDivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
CudaType x2;
};
class CudaFloorDivideASKernel : public FloorDivideASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(FloorDivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(FloorDivideASKernel, CudaFloorDivideASKernel);
template <typename T>
struct FloorDivideSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = cuda::FloorDivide(x1, x2); }
CudaType x1;
};
class CudaFloorDivideSAKernel : public FloorDivideSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) override {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(FloorDivideSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(FloorDivideSAKernel, CudaFloorDivideSAKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(DivideKernel, { out = ArithmeticOps<CudaType>::Divide(x1, x2); });
template <typename T>
struct DivideASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
CudaType x2;
};
class CudaDivideASKernel : public DivideASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(DivideASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(DivideASKernel, CudaDivideASKernel);
template <typename T>
struct DivideSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = ArithmeticOps<CudaType>::Divide(x1, x2); }
CudaType x1;
};
class CudaDivideSAKernel : public DivideSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) override {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(DivideSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(DivideSAKernel, CudaDivideSAKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(PowerKernel, { out = cuda::Power(x1, x2); }, VisitNumericDtype);
template <typename T>
struct PowerASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::Power(x1, x2); }
CudaType x2;
};
class CudaPowerASKernel : public PowerASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(PowerASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(PowerASKernel, CudaPowerASKernel);
template <typename T>
struct PowerSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = cuda::Power(x1, x2); }
CudaType x1;
};
class CudaPowerSAKernel : public PowerSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(PowerSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(PowerSAKernel, CudaPowerSAKernel);
// CUDA does not have std::mod, which is used for the native backend.
template <typename T>
__device__ T ModSignedIntegerImpl(T x, T y) {
if (x == 0 || y == 0) {
return 0;
}
T ret = x % y;
if ((ret > 0 && y < 0) || (ret < 0 && y > 0)) {
return y + ret;
}
return ret;
}
__device__ int8_t Mod(int8_t x, int8_t y) { return ModSignedIntegerImpl(x, y); }
__device__ int16_t Mod(int16_t x, int16_t y) { return ModSignedIntegerImpl(x, y); }
__device__ int32_t Mod(int32_t x, int32_t y) { return ModSignedIntegerImpl(x, y); }
__device__ int64_t Mod(int64_t x, int64_t y) { return ModSignedIntegerImpl(x, y); }
__device__ uint8_t Mod(uint8_t x, uint8_t y) {
if (x == 0 || y == 0) {
return 0;
}
return x % y;
}
template <typename T>
__device__ T ModFloatImpl(T x, T y) {
if (y == 0) {
return NAN;
}
T ret = std::fmod(x, y);
if ((ret > 0 && y < 0) || (ret < 0 && y > 0)) {
return y + ret;
}
return ret;
}
__device__ double Mod(double x, double y) { return ModFloatImpl(x, y); }
__device__ float Mod(float x, float y) { return ModFloatImpl(x, y); }
__device__ cuda::Float16 Mod(cuda::Float16 x, cuda::Float16 y) { return cuda::Float16{Mod(static_cast<float>(x), static_cast<float>(y))}; }
CHAINERX_CUDA_REGISTER_ELTWISE_DTYPE_BINARY_KERNEL(ModAAKernel, { out = cuda::Mod(x1, x2); }, VisitNumericDtype);
template <typename T>
struct ModASImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x1, CudaType& out) { out = cuda::Mod(x1, x2); }
CudaType x2;
};
class CudaModASKernel : public ModASKernel {
public:
void Call(const Array& x1, Scalar x2, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, out);
const Array& x1_cast = x1.dtype() == out.dtype() ? x1 : x1.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(ModASImpl<T>{static_cast<CudaType>(x2)}, x1_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ModASKernel, CudaModASKernel);
template <typename T>
struct ModSAImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x2, CudaType& out) { out = cuda::Mod(x1, x2); }
CudaType x1;
};
class CudaModSAKernel : public ModSAKernel {
public:
void Call(Scalar x1, const Array& x2, const Array& out) override {
Device& device = x2.device();
device.CheckDevicesCompatible(x2, out);
const Array& x2_cast = x2.dtype() == out.dtype() ? x2 : x2.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(out.dtype(), [&](auto pt) {
using T = typename decltype(pt)::type;
using CudaType = cuda_internal::DataType<T>;
Elementwise<const T, T>(ModSAImpl<T>{static_cast<CudaType>(x1)}, x2_cast, out);
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(ModSAKernel, CudaModSAKernel);
CHAINERX_CUDA_REGISTER_ELTWISE_BINARY_KERNEL(FmodKernel, { out = cuda::Fmod(x1, x2); });
} // namespace
} // namespace cuda
} // namespace chainerx
|
b5cab9f5f4447989f752c34beaade6edc20a8b0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper_cuda.h"
__global__
void jacobi_seq_kernel(double * d_u, double * d_uo, double * d_f, int N, double delta2){
int i,j;
for(i = 1; i < N-1; i++){
for(j = 1; j < N-1; j++){
d_u[i*N + j] = 0.25*(d_uo[(i-1)*N + j] + d_uo[(i+1)*N + j] + d_uo[i*N + j+1] + d_uo[i*N + j-1] + delta2*d_f[i*N + j]);
}
}
}
__global__
void jacobi_single_kernel(double * d_u, double * d_uo, double * d_f, int N, double delta2){
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (j < N-1 && i < N-1){
d_u[i*N + j] = 0.25*(d_uo[(i-1)*N + j] + d_uo[(i+1)*N + j] + d_uo[i*N + j+1] + d_uo[i*N + j-1] + delta2*d_f[i*N + j]);
}
}
__global__
void jacobi_multi_kernel(double * d_u, double * d_uo, double * d_f, int N, double delta2){
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
//int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (j < N-1 && i < N/2 && j > 0 && i > 0){
d_u[i*N + j] = 0.25*(d_uo[(i-1)*N + j] + d_uo[(i+1)*N + j] + d_uo[i*N + j+1] + d_uo[i*N + j-1] + delta2*d_f[i*N + j]);
}
}
| b5cab9f5f4447989f752c34beaade6edc20a8b0d.cu | #include "helper_cuda.h"
__global__
void jacobi_seq_kernel(double * d_u, double * d_uo, double * d_f, int N, double delta2){
int i,j;
for(i = 1; i < N-1; i++){
for(j = 1; j < N-1; j++){
d_u[i*N + j] = 0.25*(d_uo[(i-1)*N + j] + d_uo[(i+1)*N + j] + d_uo[i*N + j+1] + d_uo[i*N + j-1] + delta2*d_f[i*N + j]);
}
}
}
__global__
void jacobi_single_kernel(double * d_u, double * d_uo, double * d_f, int N, double delta2){
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (j < N-1 && i < N-1){
d_u[i*N + j] = 0.25*(d_uo[(i-1)*N + j] + d_uo[(i+1)*N + j] + d_uo[i*N + j+1] + d_uo[i*N + j-1] + delta2*d_f[i*N + j]);
}
}
__global__
void jacobi_multi_kernel(double * d_u, double * d_uo, double * d_f, int N, double delta2){
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
//int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (j < N-1 && i < N/2 && j > 0 && i > 0){
d_u[i*N + j] = 0.25*(d_uo[(i-1)*N + j] + d_uo[(i+1)*N + j] + d_uo[i*N + j+1] + d_uo[i*N + j-1] + delta2*d_f[i*N + j]);
}
}
|
08fa0d0ca66a7d90b28020d39222e4be9d6c0331.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_plus_2_top [3][2];
static int dims_update_halo_kernel2_zvel_plus_2_top_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_plus_2_top_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(0,-2,0);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(0,-2,0);
}
__global__ void ops_update_halo_kernel2_zvel_plus_2_top(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[0][0] * dims_update_halo_kernel2_zvel_plus_2_top[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[1][0] * dims_update_halo_kernel2_zvel_plus_2_top[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_2_top[0][0], dims_update_halo_kernel2_zvel_plus_2_top[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_2_top[1][0], dims_update_halo_kernel2_zvel_plus_2_top[1][1], arg1);
update_halo_kernel2_zvel_plus_2_top_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,51)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
OPS_kernels[51].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_plus_2_top_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_2_top_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_2_top_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_2_top_h[1][1]) {
dims_update_halo_kernel2_zvel_plus_2_top_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_plus_2_top_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_plus_2_top_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_plus_2_top_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_2_top, dims_update_halo_kernel2_zvel_plus_2_top_h, sizeof(dims_update_halo_kernel2_zvel_plus_2_top)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_top), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[51].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 51;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 51;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
}
ops_enqueue_kernel(desc);
}
#endif
| 08fa0d0ca66a7d90b28020d39222e4be9d6c0331.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_plus_2_top [3][2];
static int dims_update_halo_kernel2_zvel_plus_2_top_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_plus_2_top_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(0,-2,0);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(0,-2,0);
}
__global__ void ops_update_halo_kernel2_zvel_plus_2_top(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[0][0] * dims_update_halo_kernel2_zvel_plus_2_top[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_2_top[1][0] * dims_update_halo_kernel2_zvel_plus_2_top[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_2_top[0][0], dims_update_halo_kernel2_zvel_plus_2_top[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_2_top[1][0], dims_update_halo_kernel2_zvel_plus_2_top[1][1], arg1);
update_halo_kernel2_zvel_plus_2_top_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,51)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
OPS_kernels[51].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_plus_2_top_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_2_top_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_2_top_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_2_top_h[1][1]) {
dims_update_halo_kernel2_zvel_plus_2_top_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_plus_2_top_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_plus_2_top_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_plus_2_top_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_2_top, dims_update_halo_kernel2_zvel_plus_2_top_h, sizeof(dims_update_halo_kernel2_zvel_plus_2_top)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_plus_2_top<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[51].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 51;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 51;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
6ea77f7e4be818b34f4ea5149eeab2872bea7cb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011-2013 NVIDIA Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of NVIDIA Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* release 1.1:
*
* o added smatinv_batch() for batched inversion of float matrices
* o added cmatinv_batch() for batched inversion of float-complex matrices
* o added special kernels for faster processing of very small matrices
* o added tuning for sm_35 to the configuration template class
*
*/
#include <stdio.h>
#include "hip/hip_complex.h"
#include "inverse.h"
#include "operations.h"
#define GRID_DIM_LIMIT (65520)
#define ARCH_SM13 (0)
#define ARCH_SM20 (1)
#define ARCH_SM30 (2)
#define ARCH_SM35 (3)
#if defined(KEPLER2)
#define GPU_ARCH (ARCH_SM35)
#elif defined(FERMI) || defined(KEPLER1)
/* FIXME: This is a hack: instead of setting up tuning parameters for KEPLER1
platforms we simply re-use the Fermi settings. This very likely leads to
suboptimal performance.
*/
#define GPU_ARCH (ARCH_SM20)
#else
#define GPU_ARCH (ARCH_SM13)
#endif
/* Poor man's typeid */
template <typename T> __device__ int isDoubleComplex();
template <> __device__ int isDoubleComplex<float>() {return 0;};
template <> __device__ int isDoubleComplex<double>() {return 0;};
template <> __device__ int isDoubleComplex<hipComplex>() {return 0;};
template <> __device__ int isDoubleComplex<hipDoubleComplex>() {return 1;};
template <typename T, int arch>
class config {
public:
};
template<> class config<float,ARCH_SM35> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim =109 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 3 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 5 };
enum { gje3DimX_11 = 6 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 7 };
enum { gje3DimX_15 = 5 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 5 };
enum { gje3DimX_18 = 5 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 6 };
enum { gje3DimX_22 = 2 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 4 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 5 };
enum { gje3DimX_30 = 5 };
enum { gje3DimX_31 = 4 };
enum { gje3DimX_32 = 4 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 6 };
enum { gje3DimX_35 = 3 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 6 };
enum { gje3DimX_39 = 7 };
enum { gje3DimX_40 = 7 };
enum { gje3DimX_41 = 6 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 6 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 7 };
enum { gje3DimX_46 = 8 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 10 };
enum { gje3DimX_50 = 10 };
enum { gje3DimX_51 = 8 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 9 };
enum { gje3DimX_54 = 7 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = 8 };
enum { gje3DimX_57 = 10 };
enum { gje3DimX_58 = 10 };
enum { gje3DimX_59 = 9 };
enum { gje3DimX_60 = 8 };
enum { gje3DimX_61 = 6 };
enum { gje3DimX_62 = 6 };
enum { gje3DimX_63 = 11 };
enum { gje3DimX_64 = 12 };
enum { gje3DimX_65 = 12 };
enum { gje3DimX_66 = 11 };
enum { gje3DimX_67 = 8 };
enum { gje3DimX_68 = 8 };
enum { gje3DimX_69 = 5 };
enum { gje3DimX_70 = 6 };
enum { gje3DimX_71 = 7 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 9 };
enum { gje3DimX_74 = 10 };
enum { gje3DimX_75 = 11 };
enum { gje3DimX_76 = 12 };
enum { gje3DimX_77 = 9 };
enum { gje3DimX_78 = 12 };
enum { gje3DimX_79 = 12 };
enum { gje3DimX_80 = 10 };
enum { gje3DimX_81 = 12 };
enum { gje3DimX_82 = 12 };
enum { gje3DimX_83 = 12 };
enum { gje3DimX_84 = 12 };
enum { gje3DimX_85 = 11 };
enum { gje3DimX_86 = 11 };
enum { gje3DimX_87 = 11 };
enum { gje3DimX_88 = 11 };
enum { gje3DimX_89 = 10 };
enum { gje3DimX_90 = 10 };
enum { gje3DimX_91 = 11 };
enum { gje3DimX_92 = 11 };
enum { gje3DimX_93 = 11 };
enum { gje3DimX_94 = 10 };
enum { gje3DimX_95 = 9 };
enum { gje3DimX_96 = 8 };
enum { gje3DimX_97 = 10 };
enum { gje3DimX_98 = 10 };
enum { gje3DimX_99 = 9 };
enum { gje3DimX_100 = 10 };
enum { gje3DimX_101 = 10 };
enum { gje3DimX_102 = 6 };
enum { gje3DimX_103 = 7 };
enum { gje3DimX_104 = 9 };
enum { gje3DimX_105 = 9 };
enum { gje3DimX_106 = 9 };
enum { gje3DimX_107 = 9 };
enum { gje3DimX_108 = 9 };
enum { gje3DimX_109 = 9 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 5 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 1 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 5 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 3 };
enum { gje3Pad_18 = 5 };
enum { gje3Pad_19 = 4 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 5 };
enum { gje3Pad_22 = 4 };
enum { gje3Pad_23 = 5 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 2 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 1 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 4 };
enum { gje3Pad_31 = 5 };
enum { gje3Pad_32 = 4 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 4 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 4 };
enum { gje3Pad_42 = 3 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 4 };
enum { gje3Pad_47 = 2 };
enum { gje3Pad_48 = 4 };
enum { gje3Pad_49 = 4 };
enum { gje3Pad_50 = 3 };
enum { gje3Pad_51 = 5 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 4 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 1 };
enum { gje3Pad_58 = 1 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 5 };
enum { gje3Pad_64 = 5 };
enum { gje3Pad_65 = 5 };
enum { gje3Pad_66 = 4 };
enum { gje3Pad_67 = 5 };
enum { gje3Pad_68 = 4 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 5 };
enum { gje3Pad_79 = 5 };
enum { gje3Pad_80 = 5 };
enum { gje3Pad_81 = 5 };
enum { gje3Pad_82 = 3 };
enum { gje3Pad_83 = 2 };
enum { gje3Pad_84 = 2 };
enum { gje3Pad_85 = 3 };
enum { gje3Pad_86 = 1 };
enum { gje3Pad_87 = 2 };
enum { gje3Pad_88 = 1 };
enum { gje3Pad_89 = 1 };
enum { gje3Pad_90 = 1 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 5 };
enum { gje3Pad_96 = 4 };
enum { gje3Pad_97 = 5 };
enum { gje3Pad_98 = 5 };
enum { gje3Pad_99 = 2 };
enum { gje3Pad_100 = 5 };
enum { gje3Pad_101 = 5 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 1 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 2 };
enum { gje3Pad_107 = 2 };
enum { gje3Pad_108 = 2 };
enum { gje3Pad_109 = 1 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 1 };
enum { gje3SrchThrd_04 = 1 };
enum { gje3SrchThrd_05 = 1 };
enum { gje3SrchThrd_06 = 1 };
enum { gje3SrchThrd_07 = 1 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 2 };
enum { gje3SrchThrd_17 = 2 };
enum { gje3SrchThrd_18 = 2 };
enum { gje3SrchThrd_19 = 2 };
enum { gje3SrchThrd_20 = 2 };
enum { gje3SrchThrd_21 = 2 };
enum { gje3SrchThrd_22 = 2 };
enum { gje3SrchThrd_23 = 2 };
enum { gje3SrchThrd_24 = 2 };
enum { gje3SrchThrd_25 = 2 };
enum { gje3SrchThrd_26 = 2 };
enum { gje3SrchThrd_27 = 2 };
enum { gje3SrchThrd_28 = 2 };
enum { gje3SrchThrd_29 = 2 };
enum { gje3SrchThrd_30 = 2 };
enum { gje3SrchThrd_31 = 2 };
enum { gje3SrchThrd_32 = 2 };
enum { gje3SrchThrd_33 = 2 };
enum { gje3SrchThrd_34 = 2 };
enum { gje3SrchThrd_35 = 2 };
enum { gje3SrchThrd_36 = 2 };
enum { gje3SrchThrd_37 = 2 };
enum { gje3SrchThrd_38 = 2 };
enum { gje3SrchThrd_39 = 2 };
enum { gje3SrchThrd_40 = 2 };
enum { gje3SrchThrd_41 = 2 };
enum { gje3SrchThrd_42 = 2 };
enum { gje3SrchThrd_43 = 2 };
enum { gje3SrchThrd_44 = 2 };
enum { gje3SrchThrd_45 = 2 };
enum { gje3SrchThrd_46 = 2 };
enum { gje3SrchThrd_47 = 2 };
enum { gje3SrchThrd_48 = 2 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 4 };
enum { gje3SrchThrd_55 = 4 };
enum { gje3SrchThrd_56 = 4 };
enum { gje3SrchThrd_57 = 4 };
enum { gje3SrchThrd_58 = 4 };
enum { gje3SrchThrd_59 = 4 };
enum { gje3SrchThrd_60 = 4 };
enum { gje3SrchThrd_61 = 4 };
enum { gje3SrchThrd_62 = 4 };
enum { gje3SrchThrd_63 = 4 };
enum { gje3SrchThrd_64 = 4 };
enum { gje3SrchThrd_65 = 4 };
enum { gje3SrchThrd_66 = 4 };
enum { gje3SrchThrd_67 = 4 };
enum { gje3SrchThrd_68 = 4 };
enum { gje3SrchThrd_69 = 4 };
enum { gje3SrchThrd_70 = 4 };
enum { gje3SrchThrd_71 = 4 };
enum { gje3SrchThrd_72 = 4 };
enum { gje3SrchThrd_73 = 4 };
enum { gje3SrchThrd_74 = 4 };
enum { gje3SrchThrd_75 = 4 };
enum { gje3SrchThrd_76 = 4 };
enum { gje3SrchThrd_77 = 4 };
enum { gje3SrchThrd_78 = 4 };
enum { gje3SrchThrd_79 = 4 };
enum { gje3SrchThrd_80 = 4 };
enum { gje3SrchThrd_81 = 4 };
enum { gje3SrchThrd_82 = 4 };
enum { gje3SrchThrd_83 = 4 };
enum { gje3SrchThrd_84 = 4 };
enum { gje3SrchThrd_85 = 4 };
enum { gje3SrchThrd_86 = 4 };
enum { gje3SrchThrd_87 = 4 };
enum { gje3SrchThrd_88 = 4 };
enum { gje3SrchThrd_89 = 4 };
enum { gje3SrchThrd_90 = 4 };
enum { gje3SrchThrd_91 = 4 };
enum { gje3SrchThrd_92 = 4 };
enum { gje3SrchThrd_93 = 4 };
enum { gje3SrchThrd_94 = 4 };
enum { gje3SrchThrd_95 = 4 };
enum { gje3SrchThrd_96 = 4 };
enum { gje3SrchThrd_97 = 4 };
enum { gje3SrchThrd_98 = 4 };
enum { gje3SrchThrd_99 = 4 };
enum { gje3SrchThrd_100 = 4 };
enum { gje3SrchThrd_101 = 4 };
enum { gje3SrchThrd_102 = 4 };
enum { gje3SrchThrd_103 = 4 };
enum { gje3SrchThrd_104 = 4 };
enum { gje3SrchThrd_105 = 4 };
enum { gje3SrchThrd_106 = 4 };
enum { gje3SrchThrd_107 = 4 };
enum { gje3SrchThrd_108 = 4 };
enum { gje3SrchThrd_109 = 4 };
enum { matInv2x2MinBatch = 1200 };
enum { matInv3x3MinBatch = 1000 };
enum { matInv4x4MinBatch = 900 };
enum { matInv5x5MinBatch = 900 };
enum { matInv6x6MinBatch = 900 };
enum { matInv7x7MinBatch = 1000 };
enum { matInv8x8MinBatch = 1000 };
enum { matInv9x9MinBatch = 1000 };
enum { matInv10x10MinBatch= 1000 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<double,ARCH_SM35> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 6 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 5 };
enum { gje3DimX_11 = 11 };
enum { gje3DimX_12 = 8 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 7 };
enum { gje3DimX_15 = 5 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 5 };
enum { gje3DimX_18 = 5 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 8 };
enum { gje3DimX_21 = 7 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 8 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 6 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 6 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 11 };
enum { gje3DimX_34 = 6 };
enum { gje3DimX_35 = 9 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 10 };
enum { gje3DimX_38 = 10 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 10 };
enum { gje3DimX_41 = 9 };
enum { gje3DimX_42 = 10 };
enum { gje3DimX_43 = 9 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = 9 };
enum { gje3DimX_46 = 12 };
enum { gje3DimX_47 = 12 };
enum { gje3DimX_48 = 12 };
enum { gje3DimX_49 = 12 };
enum { gje3DimX_50 = 12 };
enum { gje3DimX_51 = 9 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 11 };
enum { gje3DimX_54 = 12 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = 12 };
enum { gje3DimX_57 = 11 };
enum { gje3DimX_58 = 12 };
enum { gje3DimX_59 = 12 };
enum { gje3DimX_60 = 12 };
enum { gje3DimX_61 = 12 };
enum { gje3DimX_62 = 12 };
enum { gje3DimX_63 = 11 };
enum { gje3DimX_64 = 12 };
enum { gje3DimX_65 = 12 };
enum { gje3DimX_66 = 12 };
enum { gje3DimX_67 = 12 };
enum { gje3DimX_68 = 12 };
enum { gje3DimX_69 = 12 };
enum { gje3DimX_70 = 12 };
enum { gje3DimX_71 = 9 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 11 };
enum { gje3DimX_74 = 10 };
enum { gje3DimX_75 = 11 };
enum { gje3DimX_76 = 12 };
enum { gje3DimX_77 = 11 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 3 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 2 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 5 };
enum { gje3Pad_32 = 4 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 2 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 5 };
enum { gje3Pad_40 = 2 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 5 };
enum { gje3Pad_48 = 5 };
enum { gje3Pad_49 = 5 };
enum { gje3Pad_50 = 4 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 4 };
enum { gje3Pad_56 = 4 };
enum { gje3Pad_57 = 2 };
enum { gje3Pad_58 = 2 };
enum { gje3Pad_59 = 1 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 1 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 5 };
enum { gje3Pad_64 = 5 };
enum { gje3Pad_65 = 4 };
enum { gje3Pad_66 = 4 };
enum { gje3Pad_67 = 3 };
enum { gje3Pad_68 = 2 };
enum { gje3Pad_69 = 1 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 2 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 2 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 2 };
enum { gje3SrchThrd_17 = 2 };
enum { gje3SrchThrd_18 = 2 };
enum { gje3SrchThrd_19 = 2 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 5 };
enum { gje3SrchThrd_42 = 5 };
enum { gje3SrchThrd_43 = 5 };
enum { gje3SrchThrd_44 = 5 };
enum { gje3SrchThrd_45 = 5 };
enum { gje3SrchThrd_46 = 5 };
enum { gje3SrchThrd_47 = 5 };
enum { gje3SrchThrd_48 = 5 };
enum { gje3SrchThrd_49 = 5 };
enum { gje3SrchThrd_50 = 5 };
enum { gje3SrchThrd_51 = 5 };
enum { gje3SrchThrd_52 = 5 };
enum { gje3SrchThrd_53 = 5 };
enum { gje3SrchThrd_54 = 5 };
enum { gje3SrchThrd_55 = 5 };
enum { gje3SrchThrd_56 = 5 };
enum { gje3SrchThrd_57 = 5 };
enum { gje3SrchThrd_58 = 5 };
enum { gje3SrchThrd_59 = 5 };
enum { gje3SrchThrd_60 = 5 };
enum { gje3SrchThrd_61 = 5 };
enum { gje3SrchThrd_62 = 5 };
enum { gje3SrchThrd_63 = 5 };
enum { gje3SrchThrd_64 = 5 };
enum { gje3SrchThrd_65 = 5 };
enum { gje3SrchThrd_66 = 5 };
enum { gje3SrchThrd_67 = 5 };
enum { gje3SrchThrd_68 = 5 };
enum { gje3SrchThrd_69 = 5 };
enum { gje3SrchThrd_70 = 5 };
enum { gje3SrchThrd_71 = 5 };
enum { gje3SrchThrd_72 = 5 };
enum { gje3SrchThrd_73 = 5 };
enum { gje3SrchThrd_74 = 5 };
enum { gje3SrchThrd_75 = 5 };
enum { gje3SrchThrd_76 = 5 };
enum { gje3SrchThrd_77 = 5 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1300 };
enum { matInv3x3MinBatch = 1100 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1100 };
enum { matInv6x6MinBatch = 1100 };
enum { matInv7x7MinBatch = 1100 };
enum { matInv8x8MinBatch = 1100 };
enum { matInv9x9MinBatch = 1100 };
enum { matInv10x10MinBatch= 1200 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<hipComplex,ARCH_SM35> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 6 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 10 };
enum { gje3DimX_11 = 11 };
enum { gje3DimX_12 = 8 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 7 };
enum { gje3DimX_15 = 5 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 5 };
enum { gje3DimX_18 = 5 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 8 };
enum { gje3DimX_21 = 6 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 8 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 6 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 6 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 11 };
enum { gje3DimX_34 = 6 };
enum { gje3DimX_35 = 7 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 10 };
enum { gje3DimX_38 = 10 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 10 };
enum { gje3DimX_41 = 9 };
enum { gje3DimX_42 = 10 };
enum { gje3DimX_43 = 11 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = 9 };
enum { gje3DimX_46 = 12 };
enum { gje3DimX_47 = 12 };
enum { gje3DimX_48 = 12 };
enum { gje3DimX_49 = 12 };
enum { gje3DimX_50 = 10 };
enum { gje3DimX_51 = 9 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 11 };
enum { gje3DimX_54 = 12 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = 12 };
enum { gje3DimX_57 = 12 };
enum { gje3DimX_58 = 12 };
enum { gje3DimX_59 = 12 };
enum { gje3DimX_60 = 12 };
enum { gje3DimX_61 = 12 };
enum { gje3DimX_62 = 12 };
enum { gje3DimX_63 = 11 };
enum { gje3DimX_64 = 12 };
enum { gje3DimX_65 = 12 };
enum { gje3DimX_66 = 11 };
enum { gje3DimX_67 = 12 };
enum { gje3DimX_68 = 12 };
enum { gje3DimX_69 = 12 };
enum { gje3DimX_70 = 12 };
enum { gje3DimX_71 = 12 };
enum { gje3DimX_72 = 12 };
enum { gje3DimX_73 = 11 };
enum { gje3DimX_74 = 10 };
enum { gje3DimX_75 = 11 };
enum { gje3DimX_76 = 12 };
enum { gje3DimX_77 = 10 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 5 };
enum { gje3Pad_15 = 3 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 1 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 5 };
enum { gje3Pad_32 = 4 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 4 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 5 };
enum { gje3Pad_40 = 2 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 2 };
enum { gje3Pad_48 = 1 };
enum { gje3Pad_49 = 5 };
enum { gje3Pad_50 = 3 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 4 };
enum { gje3Pad_56 = 4 };
enum { gje3Pad_57 = 3 };
enum { gje3Pad_58 = 2 };
enum { gje3Pad_59 = 1 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 1 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 5 };
enum { gje3Pad_65 = 4 };
enum { gje3Pad_66 = 3 };
enum { gje3Pad_67 = 3 };
enum { gje3Pad_68 = 2 };
enum { gje3Pad_69 = 1 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 5 };
enum { gje3Pad_72 = 4 };
enum { gje3Pad_73 = 2 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 3 };
enum { gje3SrchThrd_42 = 3 };
enum { gje3SrchThrd_43 = 3 };
enum { gje3SrchThrd_44 = 5 };
enum { gje3SrchThrd_45 = 5 };
enum { gje3SrchThrd_46 = 5 };
enum { gje3SrchThrd_47 = 5 };
enum { gje3SrchThrd_48 = 5 };
enum { gje3SrchThrd_49 = 5 };
enum { gje3SrchThrd_50 = 5 };
enum { gje3SrchThrd_51 = 5 };
enum { gje3SrchThrd_52 = 5 };
enum { gje3SrchThrd_53 = 5 };
enum { gje3SrchThrd_54 = 5 };
enum { gje3SrchThrd_55 = 5 };
enum { gje3SrchThrd_56 = 5 };
enum { gje3SrchThrd_57 = 5 };
enum { gje3SrchThrd_58 = 5 };
enum { gje3SrchThrd_59 = 5 };
enum { gje3SrchThrd_60 = 5 };
enum { gje3SrchThrd_61 = 5 };
enum { gje3SrchThrd_62 = 5 };
enum { gje3SrchThrd_63 = 5 };
enum { gje3SrchThrd_64 = 5 };
enum { gje3SrchThrd_65 = 5 };
enum { gje3SrchThrd_66 = 5 };
enum { gje3SrchThrd_67 = 5 };
enum { gje3SrchThrd_68 = 5 };
enum { gje3SrchThrd_69 = 5 };
enum { gje3SrchThrd_70 = 5 };
enum { gje3SrchThrd_71 = 5 };
enum { gje3SrchThrd_72 = 5 };
enum { gje3SrchThrd_73 = 5 };
enum { gje3SrchThrd_74 = 5 };
enum { gje3SrchThrd_75 = 5 };
enum { gje3SrchThrd_76 = 5 };
enum { gje3SrchThrd_77 = 6 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1300 };
enum { matInv3x3MinBatch = 1200 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1100 };
enum { matInv6x6MinBatch = 1100 };
enum { matInv7x7MinBatch = 1300 };
enum { matInv8x8MinBatch = 1400 };
enum { matInv9x9MinBatch = 1500 };
enum { matInv10x10MinBatch= 1500 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<hipDoubleComplex,ARCH_SM35> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 55 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 6 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 11 };
enum { gje3DimX_12 = 6 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 8 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 9 };
enum { gje3DimX_18 = 6 };
enum { gje3DimX_19 = 8 };
enum { gje3DimX_20 = 8 };
enum { gje3DimX_21 = 7 };
enum { gje3DimX_22 = 8 };
enum { gje3DimX_23 = 8 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 9 };
enum { gje3DimX_26 = 10 };
enum { gje3DimX_27 = 8 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 8 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 11 };
enum { gje3DimX_34 = 12 };
enum { gje3DimX_35 = 12 };
enum { gje3DimX_36 = 12 };
enum { gje3DimX_37 = 10 };
enum { gje3DimX_38 = 10 };
enum { gje3DimX_39 = 11 };
enum { gje3DimX_40 = 10 };
enum { gje3DimX_41 = 12 };
enum { gje3DimX_42 = 12 };
enum { gje3DimX_43 = 11 };
enum { gje3DimX_44 = 12 };
enum { gje3DimX_45 = 12 };
enum { gje3DimX_46 = 12 };
enum { gje3DimX_47 = 12 };
enum { gje3DimX_48 = 12 };
enum { gje3DimX_49 = 10 };
enum { gje3DimX_50 = 10 };
enum { gje3DimX_51 = 11 };
enum { gje3DimX_52 = 12 };
enum { gje3DimX_53 = 12 };
enum { gje3DimX_54 = 11 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 2 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 3 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 2 };
enum { gje3Pad_35 = 1 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 4 };
enum { gje3Pad_40 = 2 };
enum { gje3Pad_41 = 3 };
enum { gje3Pad_42 = 2 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 5 };
enum { gje3Pad_48 = 4 };
enum { gje3Pad_49 = 1 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 1 };
enum { gje3Pad_54 = 1 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 3 };
enum { gje3SrchThrd_42 = 3 };
enum { gje3SrchThrd_43 = 3 };
enum { gje3SrchThrd_44 = 3 };
enum { gje3SrchThrd_45 = 3 };
enum { gje3SrchThrd_46 = 3 };
enum { gje3SrchThrd_47 = 4 };
enum { gje3SrchThrd_48 = 4 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 4 };
enum { gje3SrchThrd_55 = 4 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1200 };
enum { matInv3x3MinBatch = 1100 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1200 };
enum { matInv6x6MinBatch = 1200 };
enum { matInv7x7MinBatch = 1500 };
enum { matInv8x8MinBatch = 7700 };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 8 };
};
template<> class config<float,ARCH_SM20> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim =109 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1536 }; /* sm_2x, 21 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 5 };
enum { gje3DimX_11 = 5 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 4 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 4 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 3 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 3 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 3 };
enum { gje3DimX_29 = 3 };
enum { gje3DimX_30 = 2 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 3 };
enum { gje3DimX_33 = 2 };
enum { gje3DimX_34 = 2 };
enum { gje3DimX_35 = 4 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 2 };
enum { gje3DimX_38 = 2 };
enum { gje3DimX_39 = 4 };
enum { gje3DimX_40 = 3 };
enum { gje3DimX_41 = 3 };
enum { gje3DimX_42 = 3 };
enum { gje3DimX_43 = 2 };
enum { gje3DimX_44 = 2 };
enum { gje3DimX_45 = 4 };
enum { gje3DimX_46 = 2 };
enum { gje3DimX_47 = 4 };
enum { gje3DimX_48 = 4 };
enum { gje3DimX_49 = 3 };
enum { gje3DimX_50 = 3 };
enum { gje3DimX_51 = 3 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 3 };
enum { gje3DimX_54 = 4 };
enum { gje3DimX_55 = 4 };
enum { gje3DimX_56 = 4 };
enum { gje3DimX_57 = 5 };
enum { gje3DimX_58 = 6 };
enum { gje3DimX_59 = 4 };
enum { gje3DimX_60 = 4 };
enum { gje3DimX_61 = 4 };
enum { gje3DimX_62 = 4 };
enum { gje3DimX_63 = 7 };
enum { gje3DimX_64 = 8 };
enum { gje3DimX_65 = 8 };
enum { gje3DimX_66 = 6 };
enum { gje3DimX_67 = 5 };
enum { gje3DimX_68 = 4 };
enum { gje3DimX_69 = 5 };
enum { gje3DimX_70 = 5 };
enum { gje3DimX_71 = 4 };
enum { gje3DimX_72 = 6 };
enum { gje3DimX_73 = 5 };
enum { gje3DimX_74 = 5 };
enum { gje3DimX_75 = 6 };
enum { gje3DimX_76 = 4 };
enum { gje3DimX_77 = 7 };
enum { gje3DimX_78 = 8 };
enum { gje3DimX_79 = 8 };
enum { gje3DimX_80 = 8 };
enum { gje3DimX_81 = 9 };
enum { gje3DimX_82 = 7 };
enum { gje3DimX_83 = 6 };
enum { gje3DimX_84 = 6 };
enum { gje3DimX_85 = 6 };
enum { gje3DimX_86 = 8 };
enum { gje3DimX_87 = 8 };
enum { gje3DimX_88 = 8 };
enum { gje3DimX_89 = 7 };
enum { gje3DimX_90 = 7 };
enum { gje3DimX_91 = 7 };
enum { gje3DimX_92 = 6 };
enum { gje3DimX_93 = 6 };
enum { gje3DimX_94 = 6 };
enum { gje3DimX_95 = 8 };
enum { gje3DimX_96 = 8 };
enum { gje3DimX_97 = 10 };
enum { gje3DimX_98 = 6 };
enum { gje3DimX_99 = 5 };
enum { gje3DimX_100 = 4 };
enum { gje3DimX_101 = 5 };
enum { gje3DimX_102 = 6 };
enum { gje3DimX_103 = 7 };
enum { gje3DimX_104 = 8 };
enum { gje3DimX_105 = 7 };
enum { gje3DimX_106 = 6 };
enum { gje3DimX_107 = 7 };
enum { gje3DimX_108 = 4 };
enum { gje3DimX_109 = 7 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 1 };
enum { gje3Pad_03 = 1 };
enum { gje3Pad_04 = 1 };
enum { gje3Pad_05 = 1 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 2 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 1 };
enum { gje3Pad_10 = 2 };
enum { gje3Pad_11 = 1 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 4 };
enum { gje3Pad_15 = 5 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 1 };
enum { gje3Pad_20 = 3 };
enum { gje3Pad_21 = 1 };
enum { gje3Pad_22 = 5 };
enum { gje3Pad_23 = 5 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 2 };
enum { gje3Pad_26 = 4 };
enum { gje3Pad_27 = 2 };
enum { gje3Pad_28 = 1 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 4 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 1 };
enum { gje3Pad_34 = 4 };
enum { gje3Pad_35 = 1 };
enum { gje3Pad_36 = 1 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 4 };
enum { gje3Pad_39 = 5 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 1 };
enum { gje3Pad_42 = 4 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 4 };
enum { gje3Pad_47 = 5 };
enum { gje3Pad_48 = 4 };
enum { gje3Pad_49 = 1 };
enum { gje3Pad_50 = 4 };
enum { gje3Pad_51 = 3 };
enum { gje3Pad_52 = 3 };
enum { gje3Pad_53 = 1 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 5 };
enum { gje3Pad_56 = 4 };
enum { gje3Pad_57 = 2 };
enum { gje3Pad_58 = 1 };
enum { gje3Pad_59 = 1 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 1 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 5 };
enum { gje3Pad_64 = 4 };
enum { gje3Pad_65 = 3 };
enum { gje3Pad_66 = 4 };
enum { gje3Pad_67 = 2 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 3 };
enum { gje3Pad_71 = 5 };
enum { gje3Pad_72 = 3 };
enum { gje3Pad_73 = 3 };
enum { gje3Pad_74 = 2 };
enum { gje3Pad_75 = 2 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 5 };
enum { gje3Pad_79 = 5 };
enum { gje3Pad_80 = 4 };
enum { gje3Pad_81 = 4 };
enum { gje3Pad_82 = 1 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 3 };
enum { gje3Pad_85 = 2 };
enum { gje3Pad_86 = 2 };
enum { gje3Pad_87 = 1 };
enum { gje3Pad_88 = 1 };
enum { gje3Pad_89 = 1 };
enum { gje3Pad_90 = 1 };
enum { gje3Pad_91 = 1 };
enum { gje3Pad_92 = 1 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 5 };
enum { gje3Pad_95 = 5 };
enum { gje3Pad_96 = 4 };
enum { gje3Pad_97 = 5 };
enum { gje3Pad_98 = 4 };
enum { gje3Pad_99 = 2 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 2 };
enum { gje3Pad_104 = 1 };
enum { gje3Pad_105 = 4 };
enum { gje3Pad_106 = 2 };
enum { gje3Pad_107 = 2 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 1 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = 4 };
enum { gje3SrchThrd_46 = 4 };
enum { gje3SrchThrd_47 = 4 };
enum { gje3SrchThrd_48 = 4 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 4 };
enum { gje3SrchThrd_55 = 4 };
enum { gje3SrchThrd_56 = 4 };
enum { gje3SrchThrd_57 = 4 };
enum { gje3SrchThrd_58 = 4 };
enum { gje3SrchThrd_59 = 4 };
enum { gje3SrchThrd_60 = 4 };
enum { gje3SrchThrd_61 = 4 };
enum { gje3SrchThrd_62 = 4 };
enum { gje3SrchThrd_63 = 4 };
enum { gje3SrchThrd_64 = 4 };
enum { gje3SrchThrd_65 = 4 };
enum { gje3SrchThrd_66 = 5 };
enum { gje3SrchThrd_67 = 5 };
enum { gje3SrchThrd_68 = 5 };
enum { gje3SrchThrd_69 = 5 };
enum { gje3SrchThrd_70 = 5 };
enum { gje3SrchThrd_71 = 5 };
enum { gje3SrchThrd_72 = 5 };
enum { gje3SrchThrd_73 = 5 };
enum { gje3SrchThrd_74 = 5 };
enum { gje3SrchThrd_75 = 5 };
enum { gje3SrchThrd_76 = 5 };
enum { gje3SrchThrd_77 = 5 };
enum { gje3SrchThrd_78 = 5 };
enum { gje3SrchThrd_79 = 5 };
enum { gje3SrchThrd_80 = 5 };
enum { gje3SrchThrd_81 = 5 };
enum { gje3SrchThrd_82 = 5 };
enum { gje3SrchThrd_83 = 5 };
enum { gje3SrchThrd_84 = 6 };
enum { gje3SrchThrd_85 = 6 };
enum { gje3SrchThrd_86 = 6 };
enum { gje3SrchThrd_87 = 6 };
enum { gje3SrchThrd_88 = 6 };
enum { gje3SrchThrd_89 = 6 };
enum { gje3SrchThrd_90 = 6 };
enum { gje3SrchThrd_91 = 6 };
enum { gje3SrchThrd_92 = 6 };
enum { gje3SrchThrd_93 = 6 };
enum { gje3SrchThrd_94 = 6 };
enum { gje3SrchThrd_95 = 6 };
enum { gje3SrchThrd_96 = 6 };
enum { gje3SrchThrd_97 = 6 };
enum { gje3SrchThrd_98 = 6 };
enum { gje3SrchThrd_99 = 6 };
enum { gje3SrchThrd_100 = 6 };
enum { gje3SrchThrd_101 = 6 };
enum { gje3SrchThrd_102 = 6 };
enum { gje3SrchThrd_103 = 6 };
enum { gje3SrchThrd_104 = 6 };
enum { gje3SrchThrd_105 = 6 };
enum { gje3SrchThrd_106 = 6 };
enum { gje3SrchThrd_107 = 6 };
enum { gje3SrchThrd_108 = 6 };
enum { gje3SrchThrd_109 = 6 };
enum { matInv2x2MinBatch = 1700 };
enum { matInv3x3MinBatch = 1400 };
enum { matInv4x4MinBatch = 1400 };
enum { matInv5x5MinBatch = 1300 };
enum { matInv6x6MinBatch = 1400 };
enum { matInv7x7MinBatch = 1200 };
enum { matInv8x8MinBatch = 1200 };
enum { matInv9x9MinBatch = 1200 };
enum { matInv10x10MinBatch= 1300 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<double,ARCH_SM20> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1408 }; /* sm_2x, 23 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 5 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 4 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 4 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 3 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 3 };
enum { gje3DimX_30 = 3 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 3 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 4 };
enum { gje3DimX_35 = 3 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 4 };
enum { gje3DimX_39 = 4 };
enum { gje3DimX_40 = 4 };
enum { gje3DimX_41 = 6 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 5 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 7 };
enum { gje3DimX_46 = 6 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 8 };
enum { gje3DimX_50 = 4 };
enum { gje3DimX_51 = 5 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 5 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 7 };
enum { gje3DimX_56 = 9 };
enum { gje3DimX_57 = 9 };
enum { gje3DimX_58 = 10 };
enum { gje3DimX_59 = 7 };
enum { gje3DimX_60 = 8 };
enum { gje3DimX_61 = 7 };
enum { gje3DimX_62 = 7 };
enum { gje3DimX_63 = 7 };
enum { gje3DimX_64 = 8 };
enum { gje3DimX_65 = 8 };
enum { gje3DimX_66 = 8 };
enum { gje3DimX_67 = 8 };
enum { gje3DimX_68 = 8 };
enum { gje3DimX_69 = 5 };
enum { gje3DimX_70 = 6 };
enum { gje3DimX_71 = 7 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 9 };
enum { gje3DimX_74 = 6 };
enum { gje3DimX_75 = 7 };
enum { gje3DimX_76 = 7 };
enum { gje3DimX_77 = 7 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 4 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 4 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 2 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 2 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 4 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 1 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 2 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 2 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 4 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 4 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 4 };
enum { gje3SrchThrd_38 = 4 };
enum { gje3SrchThrd_39 = 4 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = 4 };
enum { gje3SrchThrd_46 = 4 };
enum { gje3SrchThrd_47 = 4 };
enum { gje3SrchThrd_48 = 4 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 5 };
enum { gje3SrchThrd_55 = 6 };
enum { gje3SrchThrd_56 = 6 };
enum { gje3SrchThrd_57 = 6 };
enum { gje3SrchThrd_58 = 6 };
enum { gje3SrchThrd_59 = 6 };
enum { gje3SrchThrd_60 = 6 };
enum { gje3SrchThrd_61 = 6 };
enum { gje3SrchThrd_62 = 6 };
enum { gje3SrchThrd_63 = 6 };
enum { gje3SrchThrd_64 = 6 };
enum { gje3SrchThrd_65 = 6 };
enum { gje3SrchThrd_66 = 6 };
enum { gje3SrchThrd_67 = 6 };
enum { gje3SrchThrd_68 = 6 };
enum { gje3SrchThrd_69 = 6 };
enum { gje3SrchThrd_70 = 6 };
enum { gje3SrchThrd_71 = 6 };
enum { gje3SrchThrd_72 = 6 };
enum { gje3SrchThrd_73 = 6 };
enum { gje3SrchThrd_74 = 6 };
enum { gje3SrchThrd_75 = 6 };
enum { gje3SrchThrd_76 = 6 };
enum { gje3SrchThrd_77 = 6 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1800 };
enum { matInv3x3MinBatch = 1400 };
enum { matInv4x4MinBatch = 1300 };
enum { matInv5x5MinBatch = 1200 };
enum { matInv6x6MinBatch = 1200 };
enum { matInv7x7MinBatch = 1200 };
enum { matInv8x8MinBatch = 0x7fffffff };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 7 };
};
template<> class config<hipComplex,ARCH_SM20> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1408 }; /* sm_2x, 23 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 5 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 5 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 4 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 3 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 3 };
enum { gje3DimX_30 = 3 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 3 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 4 };
enum { gje3DimX_35 = 3 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 4 };
enum { gje3DimX_39 = 5 };
enum { gje3DimX_40 = 4 };
enum { gje3DimX_41 = 6 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 5 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 5 };
enum { gje3DimX_46 = 6 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 7 };
enum { gje3DimX_50 = 4 };
enum { gje3DimX_51 = 5 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 5 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 7 };
enum { gje3DimX_56 = 9 };
enum { gje3DimX_57 = 9 };
enum { gje3DimX_58 = 10 };
enum { gje3DimX_59 = 7 };
enum { gje3DimX_60 = 8 };
enum { gje3DimX_61 = 7 };
enum { gje3DimX_62 = 7 };
enum { gje3DimX_63 = 7 };
enum { gje3DimX_64 = 8 };
enum { gje3DimX_65 = 8 };
enum { gje3DimX_66 = 8 };
enum { gje3DimX_67 = 8 };
enum { gje3DimX_68 = 7 };
enum { gje3DimX_69 = 7 };
enum { gje3DimX_70 = 6 };
enum { gje3DimX_71 = 7 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 7 };
enum { gje3DimX_74 = 6 };
enum { gje3DimX_75 = 7 };
enum { gje3DimX_76 = 4 };
enum { gje3DimX_77 = 7 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 5 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 5 };
enum { gje3Pad_15 = 5 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 2 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 4 };
enum { gje3Pad_23 = 5 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 2 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 3 };
enum { gje3Pad_40 = 4 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 1 };
enum { gje3Pad_43 = 1 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 3 };
enum { gje3Pad_49 = 5 };
enum { gje3Pad_50 = 2 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 1 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 5 };
enum { gje3Pad_68 = 3 };
enum { gje3Pad_69 = 2 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 2 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 3 };
enum { gje3SrchThrd_06 = 3 };
enum { gje3SrchThrd_07 = 3 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 4 };
enum { gje3SrchThrd_26 = 4 };
enum { gje3SrchThrd_27 = 4 };
enum { gje3SrchThrd_28 = 4 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 5 };
enum { gje3SrchThrd_36 = 5 };
enum { gje3SrchThrd_37 = 5 };
enum { gje3SrchThrd_38 = 5 };
enum { gje3SrchThrd_39 = 5 };
enum { gje3SrchThrd_40 = 5 };
enum { gje3SrchThrd_41 = 5 };
enum { gje3SrchThrd_42 = 5 };
enum { gje3SrchThrd_43 = 5 };
enum { gje3SrchThrd_44 = 5 };
enum { gje3SrchThrd_45 = 5 };
enum { gje3SrchThrd_46 = 5 };
enum { gje3SrchThrd_47 = 5 };
enum { gje3SrchThrd_48 = 5 };
enum { gje3SrchThrd_49 = 5 };
enum { gje3SrchThrd_50 = 6 };
enum { gje3SrchThrd_51 = 6 };
enum { gje3SrchThrd_52 = 6 };
enum { gje3SrchThrd_53 = 6 };
enum { gje3SrchThrd_54 = 6 };
enum { gje3SrchThrd_55 = 6 };
enum { gje3SrchThrd_56 = 6 };
enum { gje3SrchThrd_57 = 6 };
enum { gje3SrchThrd_58 = 6 };
enum { gje3SrchThrd_59 = 7 };
enum { gje3SrchThrd_60 = 7 };
enum { gje3SrchThrd_61 = 7 };
enum { gje3SrchThrd_62 = 7 };
enum { gje3SrchThrd_63 = 7 };
enum { gje3SrchThrd_64 = 7 };
enum { gje3SrchThrd_65 = 7 };
enum { gje3SrchThrd_66 = 7 };
enum { gje3SrchThrd_67 = 7 };
enum { gje3SrchThrd_68 = 7 };
enum { gje3SrchThrd_69 = 7 };
enum { gje3SrchThrd_70 = 7 };
enum { gje3SrchThrd_71 = 7 };
enum { gje3SrchThrd_72 = 7 };
enum { gje3SrchThrd_73 = 7 };
enum { gje3SrchThrd_74 = 7 };
enum { gje3SrchThrd_75 = 7 };
enum { gje3SrchThrd_76 = 7 };
enum { gje3SrchThrd_77 = 8 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1700 };
enum { matInv3x3MinBatch = 1300 };
enum { matInv4x4MinBatch = 1200 };
enum { matInv5x5MinBatch = 1200 };
enum { matInv6x6MinBatch = 1000 };
enum { matInv7x7MinBatch = 1100 };
enum { matInv8x8MinBatch = 1650 };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 8 };
};
template<> class config<hipDoubleComplex,ARCH_SM20> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 55 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1152 }; /* sm_2x, 28 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 6 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 5 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 4 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 4 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 5 };
enum { gje3DimX_30 = 6 };
enum { gje3DimX_31 = 7 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 8 };
enum { gje3DimX_34 = 8 };
enum { gje3DimX_35 = 8 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 6 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 8 };
enum { gje3DimX_41 = 8 };
enum { gje3DimX_42 = 8 };
enum { gje3DimX_43 = 8 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = 8 };
enum { gje3DimX_46 = 8 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 8 };
enum { gje3DimX_50 = 8 };
enum { gje3DimX_51 = 8 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 8 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 8 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 4 };
enum { gje3Pad_08 = 2 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 4 };
enum { gje3Pad_11 = 3 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 2 };
enum { gje3Pad_17 = 2 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 4 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 1 };
enum { gje3Pad_33 = 0 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 1 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 4 };
enum { gje3SrchThrd_22 = 4 };
enum { gje3SrchThrd_23 = 4 };
enum { gje3SrchThrd_24 = 4 };
enum { gje3SrchThrd_25 = 4 };
enum { gje3SrchThrd_26 = 4 };
enum { gje3SrchThrd_27 = 4 };
enum { gje3SrchThrd_28 = 4 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 4 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 6 };
enum { gje3SrchThrd_38 = 6 };
enum { gje3SrchThrd_39 = 6 };
enum { gje3SrchThrd_40 = 6 };
enum { gje3SrchThrd_41 = 6 };
enum { gje3SrchThrd_42 = 6 };
enum { gje3SrchThrd_43 = 6 };
enum { gje3SrchThrd_44 = 6 };
enum { gje3SrchThrd_45 = 6 };
enum { gje3SrchThrd_46 = 7 };
enum { gje3SrchThrd_47 = 7 };
enum { gje3SrchThrd_48 = 7 };
enum { gje3SrchThrd_49 = 7 };
enum { gje3SrchThrd_50 = 7 };
enum { gje3SrchThrd_51 = 7 };
enum { gje3SrchThrd_52 = 7 };
enum { gje3SrchThrd_53 = 7 };
enum { gje3SrchThrd_54 = 7 };
enum { gje3SrchThrd_55 = 7 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1600 };
enum { matInv3x3MinBatch = 1300 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1600 };
enum { matInv6x6MinBatch = 0x7fffffff };
enum { matInv7x7MinBatch = 0x7fffffff };
enum { matInv8x8MinBatch = 0x7fffffff };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 5 };
};
template<> class config<float,ARCH_SM13> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 62 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1024 }; /* sm_13, 16 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 4 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 2 };
enum { gje3DimX_14 = 2 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 2 };
enum { gje3DimX_17 = 2 };
enum { gje3DimX_18 = 2 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 2 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 2 };
enum { gje3DimX_23 = 2 };
enum { gje3DimX_24 = 2 };
enum { gje3DimX_25 = 3 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 2 };
enum { gje3DimX_30 = 2 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 2 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 2 };
enum { gje3DimX_35 = 5 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 5 };
enum { gje3DimX_39 = 3 };
enum { gje3DimX_40 = 4 };
enum { gje3DimX_41 = 3 };
enum { gje3DimX_42 = 3 };
enum { gje3DimX_43 = 5 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 7 };
enum { gje3DimX_46 = 8 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 7 };
enum { gje3DimX_50 = 8 };
enum { gje3DimX_51 = 5 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 5 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 7 };
enum { gje3DimX_56 = 8 };
enum { gje3DimX_57 = 5 };
enum { gje3DimX_58 = 6 };
enum { gje3DimX_59 = 7 };
enum { gje3DimX_60 = 4 };
enum { gje3DimX_61 = 7 };
enum { gje3DimX_62 = 8 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 4 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 4 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 1 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 4 };
enum { gje3Pad_15 = 3 };
enum { gje3Pad_16 = 2 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 2 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 2 };
enum { gje3Pad_25 = 1 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 2 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 2 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 3 };
enum { gje3Pad_40 = 4 };
enum { gje3Pad_41 = 1 };
enum { gje3Pad_42 = 3 };
enum { gje3Pad_43 = 1 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 3 };
enum { gje3Pad_49 = 3 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 4 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 3 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 1 };
enum { gje3SrchThrd_04 = 1 };
enum { gje3SrchThrd_05 = 1 };
enum { gje3SrchThrd_06 = 1 };
enum { gje3SrchThrd_07 = 1 };
enum { gje3SrchThrd_08 = 1 };
enum { gje3SrchThrd_09 = 1 };
enum { gje3SrchThrd_10 = 1 };
enum { gje3SrchThrd_11 = 1 };
enum { gje3SrchThrd_12 = 1 };
enum { gje3SrchThrd_13 = 1 };
enum { gje3SrchThrd_14 = 1 };
enum { gje3SrchThrd_15 = 1 };
enum { gje3SrchThrd_16 = 1 };
enum { gje3SrchThrd_17 = 1 };
enum { gje3SrchThrd_18 = 1 };
enum { gje3SrchThrd_19 = 1 };
enum { gje3SrchThrd_20 = 1 };
enum { gje3SrchThrd_21 = 1 };
enum { gje3SrchThrd_22 = 1 };
enum { gje3SrchThrd_23 = 1 };
enum { gje3SrchThrd_24 = 1 };
enum { gje3SrchThrd_25 = 1 };
enum { gje3SrchThrd_26 = 1 };
enum { gje3SrchThrd_27 = 1 };
enum { gje3SrchThrd_28 = 1 };
enum { gje3SrchThrd_29 = 1 };
enum { gje3SrchThrd_30 = 1 };
enum { gje3SrchThrd_31 = 2 };
enum { gje3SrchThrd_32 = 2 };
enum { gje3SrchThrd_33 = 2 };
enum { gje3SrchThrd_34 = 2 };
enum { gje3SrchThrd_35 = 2 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 3 };
enum { gje3SrchThrd_42 = 3 };
enum { gje3SrchThrd_43 = 3 };
enum { gje3SrchThrd_44 = 3 };
enum { gje3SrchThrd_45 = 3 };
enum { gje3SrchThrd_46 = 3 };
enum { gje3SrchThrd_47 = 3 };
enum { gje3SrchThrd_48 = 3 };
enum { gje3SrchThrd_49 = 3 };
enum { gje3SrchThrd_50 = 3 };
enum { gje3SrchThrd_51 = 3 };
enum { gje3SrchThrd_52 = 3 };
enum { gje3SrchThrd_53 = 3 };
enum { gje3SrchThrd_54 = 3 };
enum { gje3SrchThrd_55 = 3 };
enum { gje3SrchThrd_56 = 3 };
enum { gje3SrchThrd_57 = 3 };
enum { gje3SrchThrd_58 = 3 };
enum { gje3SrchThrd_59 = 3 };
enum { gje3SrchThrd_60 = 3 };
enum { gje3SrchThrd_61 = 3 };
enum { gje3SrchThrd_62 = 3 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 35000 };
enum { matInv3x3MinBatch = 45000 };
enum { matInv4x4MinBatch = 40000 };
enum { matInv5x5MinBatch = 25000 };
enum { matInv6x6MinBatch = 15000 };
enum { matInv7x7MinBatch = 11000 };
enum { matInv8x8MinBatch = 9500 };
enum { matInv9x9MinBatch = 9000 };
enum { matInv10x10MinBatch= 6000 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<double,ARCH_SM13> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 44 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds =768 }; /* sm_13, 21 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 3 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 2 };
enum { gje3DimX_08 = 2 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 2 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 2 };
enum { gje3DimX_14 = 2 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 2 };
enum { gje3DimX_17 = 2 };
enum { gje3DimX_18 = 2 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 2 };
enum { gje3DimX_24 = 2 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 4 };
enum { gje3DimX_27 = 4 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 5 };
enum { gje3DimX_30 = 4 };
enum { gje3DimX_31 = 2 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 7 };
enum { gje3DimX_34 = 7 };
enum { gje3DimX_35 = 7 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 8 };
enum { gje3DimX_38 = 8 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 8 };
enum { gje3DimX_41 = 7 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 8 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = -1 };
enum { gje3DimX_46 = -1 };
enum { gje3DimX_47 = -1 };
enum { gje3DimX_48 = -1 };
enum { gje3DimX_49 = -1 };
enum { gje3DimX_50 = -1 };
enum { gje3DimX_51 = -1 };
enum { gje3DimX_52 = -1 };
enum { gje3DimX_53 = -1 };
enum { gje3DimX_54 = -1 };
enum { gje3DimX_55 = -1 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 2 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 4 };
enum { gje3Pad_08 = 3 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 1 };
enum { gje3Pad_11 = 2 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 2 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 2 };
enum { gje3Pad_20 = 2 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 4 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 4 };
enum { gje3Pad_26 = 4 };
enum { gje3Pad_27 = 3 };
enum { gje3Pad_28 = 2 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 1 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 1 };
enum { gje3Pad_35 = 4 };
enum { gje3Pad_36 = 3 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 3 };
enum { gje3Pad_39 = 2 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 4 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 1 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 0 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 4 };
enum { gje3SrchThrd_38 = 4 };
enum { gje3SrchThrd_39 = 4 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = -1 };
enum { gje3SrchThrd_46 = -1 };
enum { gje3SrchThrd_47 = -1 };
enum { gje3SrchThrd_48 = -1 };
enum { gje3SrchThrd_49 = -1 };
enum { gje3SrchThrd_50 = -1 };
enum { gje3SrchThrd_51 = -1 };
enum { gje3SrchThrd_52 = -1 };
enum { gje3SrchThrd_53 = -1 };
enum { gje3SrchThrd_54 = -1 };
enum { gje3SrchThrd_55 = -1 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 40000 };
enum { matInv3x3MinBatch = 28000 };
enum { matInv4x4MinBatch = 17000 };
enum { matInv5x5MinBatch = 14000 };
enum { matInv6x6MinBatch = 11000 };
enum { matInv7x7MinBatch = 8500 };
enum { matInv8x8MinBatch = 13000 };
enum { matInv9x9MinBatch = 17000 };
enum { matInv10x10MinBatch= 30000 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<hipComplex,ARCH_SM13> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 44 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds =832 }; /* sm_13, 19 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 3 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 2 };
enum { gje3DimX_08 = 2 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 2 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 2 };
enum { gje3DimX_14 = 2 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 2 };
enum { gje3DimX_17 = 2 };
enum { gje3DimX_18 = 2 };
enum { gje3DimX_19 = 2 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 2 };
enum { gje3DimX_22 = 8 };
enum { gje3DimX_23 = 2 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 8 };
enum { gje3DimX_26 = 8 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 8 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 8 };
enum { gje3DimX_34 = 8 };
enum { gje3DimX_35 = 8 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 8 };
enum { gje3DimX_38 = 8 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 8 };
enum { gje3DimX_41 = 8 };
enum { gje3DimX_42 = 8 };
enum { gje3DimX_43 = 8 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = -1 };
enum { gje3DimX_46 = -1 };
enum { gje3DimX_47 = -1 };
enum { gje3DimX_48 = -1 };
enum { gje3DimX_49 = -1 };
enum { gje3DimX_50 = -1 };
enum { gje3DimX_51 = -1 };
enum { gje3DimX_52 = -1 };
enum { gje3DimX_53 = -1 };
enum { gje3DimX_54 = -1 };
enum { gje3DimX_55 = -1 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 2 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 2 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 1 };
enum { gje3Pad_11 = 2 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 2 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 2 };
enum { gje3Pad_20 = 2 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 1 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 1 };
enum { gje3Pad_26 = 1 };
enum { gje3Pad_27 = 4 };
enum { gje3Pad_28 = 1 };
enum { gje3Pad_29 = 2 };
enum { gje3Pad_30 = 1 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 1 };
enum { gje3Pad_33 = 1 };
enum { gje3Pad_34 = 1 };
enum { gje3Pad_35 = 2 };
enum { gje3Pad_36 = 2 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 1 };
enum { gje3Pad_39 = 2 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 4 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 1 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 0 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 2 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 4 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 4 };
enum { gje3SrchThrd_38 = 4 };
enum { gje3SrchThrd_39 = 4 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = -1 };
enum { gje3SrchThrd_46 = -1 };
enum { gje3SrchThrd_47 = -1 };
enum { gje3SrchThrd_48 = -1 };
enum { gje3SrchThrd_49 = -1 };
enum { gje3SrchThrd_50 = -1 };
enum { gje3SrchThrd_51 = -1 };
enum { gje3SrchThrd_52 = -1 };
enum { gje3SrchThrd_53 = -1 };
enum { gje3SrchThrd_54 = -1 };
enum { gje3SrchThrd_55 = -1 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 35000 };
enum { matInv3x3MinBatch = 35000 };
enum { matInv4x4MinBatch = 20000 };
enum { matInv5x5MinBatch = 11000 };
enum { matInv6x6MinBatch = 9000 };
enum { matInv7x7MinBatch = 7000 };
enum { matInv8x8MinBatch = 25000 };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 8 };
};
template<> class config<hipDoubleComplex,ARCH_SM13> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 31 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 640 }; /* sm_13, 25 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 3 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 2 };
enum { gje3DimX_08 = 2 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 3 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 3 };
enum { gje3DimX_14 = 3 };
enum { gje3DimX_15 = 3 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 4 };
enum { gje3DimX_18 = 4 };
enum { gje3DimX_19 = 4 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 5 };
enum { gje3DimX_22 = 5 };
enum { gje3DimX_23 = 6 };
enum { gje3DimX_24 = 6 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 6 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 6 };
enum { gje3DimX_30 = 8 };
enum { gje3DimX_31 = 4 };
enum { gje3DimX_32 = -1 };
enum { gje3DimX_33 = -1 };
enum { gje3DimX_34 = -1 };
enum { gje3DimX_35 = -1 };
enum { gje3DimX_36 = -1 };
enum { gje3DimX_37 = -1 };
enum { gje3DimX_38 = -1 };
enum { gje3DimX_39 = -1 };
enum { gje3DimX_40 = -1 };
enum { gje3DimX_41 = -1 };
enum { gje3DimX_42 = -1 };
enum { gje3DimX_43 = -1 };
enum { gje3DimX_44 = -1 };
enum { gje3DimX_45 = -1 };
enum { gje3DimX_46 = -1 };
enum { gje3DimX_47 = -1 };
enum { gje3DimX_48 = -1 };
enum { gje3DimX_49 = -1 };
enum { gje3DimX_50 = -1 };
enum { gje3DimX_51 = -1 };
enum { gje3DimX_52 = -1 };
enum { gje3DimX_53 = -1 };
enum { gje3DimX_54 = -1 };
enum { gje3DimX_55 = -1 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 1 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 1 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 1 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 1 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 1 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 0 };
enum { gje3Pad_33 = 0 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 0 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 0 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 4 };
enum { gje3SrchThrd_21 = 4 };
enum { gje3SrchThrd_22 = 4 };
enum { gje3SrchThrd_23 = 4 };
enum { gje3SrchThrd_24 = 4 };
enum { gje3SrchThrd_25 = 4 };
enum { gje3SrchThrd_26 = 4 };
enum { gje3SrchThrd_27 = 4 };
enum { gje3SrchThrd_28 = 4 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = -1 };
enum { gje3SrchThrd_33 = -1 };
enum { gje3SrchThrd_34 = -1 };
enum { gje3SrchThrd_35 = -1 };
enum { gje3SrchThrd_36 = -1 };
enum { gje3SrchThrd_37 = -1 };
enum { gje3SrchThrd_38 = -1 };
enum { gje3SrchThrd_39 = -1 };
enum { gje3SrchThrd_40 = -1 };
enum { gje3SrchThrd_41 = -1 };
enum { gje3SrchThrd_42 = -1 };
enum { gje3SrchThrd_43 = -1 };
enum { gje3SrchThrd_44 = -1 };
enum { gje3SrchThrd_45 = -1 };
enum { gje3SrchThrd_46 = -1 };
enum { gje3SrchThrd_47 = -1 };
enum { gje3SrchThrd_48 = -1 };
enum { gje3SrchThrd_49 = -1 };
enum { gje3SrchThrd_50 = -1 };
enum { gje3SrchThrd_51 = -1 };
enum { gje3SrchThrd_52 = -1 };
enum { gje3SrchThrd_53 = -1 };
enum { gje3SrchThrd_54 = -1 };
enum { gje3SrchThrd_55 = -1 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 30000 };
enum { matInv3x3MinBatch = 15000 };
enum { matInv4x4MinBatch = 11000 };
enum { matInv5x5MinBatch = 6000 };
enum { matInv6x6MinBatch = 11000 };
enum { matInv7x7MinBatch = 17000 };
enum { matInv8x8MinBatch = 0x7fffffff };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 7 };
};
/* column-major */
#define As(row,col) As[(N+ofs)*(col)+(row)]
#define AsInv(row,col) AsInv[(N+ofs)*(col)+(row)]
#define Ainv(row,col) Ainv[(col)*N+(row)]
#define USE_PIVOTING 1
template<typename T, int arch>
__global__ void matinv_2x2_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 2;
int perm0, perm1;
int icol0, icol1;
T AA00, AA01;
T AA10, AA11;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA01 = A[2];
AA11 = A[3];
perm0 = 0;
perm1 = 1;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
/****************** iteration 1 ***********/
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
}
}
template<typename T, int arch>
__global__ void matinv_3x3_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 3;
int perm0, perm1, perm2;
int icol0, icol1, icol2;
T AA00, AA01, AA02;
T AA10, AA11, AA12;
T AA20, AA21, AA22;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA01 = A[3];
AA11 = A[4];
AA21 = A[5];
AA02 = A[6];
AA12 = A[7];
AA22 = A[8];
perm0 = 0;
perm1 = 1;
perm2 = 2;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
/****************** iteration 2 ****************/
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
}
}
template<typename T, int arch>
__global__ void matinv_4x4_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 4;
int perm0, perm1, perm2, perm3;
int icol0, icol1, icol2, icol3;
T AA00, AA01, AA02, AA03;
T AA10, AA11, AA12, AA13;
T AA20, AA21, AA22, AA23;
T AA30, AA31, AA32, AA33;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA01 = A[4];
AA11 = A[5];
AA21 = A[6];
AA31 = A[7];
AA02 = A[8];
AA12 = A[9];
AA22 = A[10];
AA32 = A[11];
AA03 = A[12];
AA13 = A[13];
AA23 = A[14];
AA33 = A[15];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
/****************** iteration 3 ****************/
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
}
}
template<typename T, int arch>
__global__ void matinv_5x5_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 5;
int perm0, perm1, perm2, perm3, perm4;
int icol0, icol1, icol2, icol3, icol4;
T AA00, AA01, AA02, AA03, AA04;
T AA10, AA11, AA12, AA13, AA14;
T AA20, AA21, AA22, AA23, AA24;
T AA30, AA31, AA32, AA33, AA34;
T AA40, AA41, AA42, AA43, AA44;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA01 = A[5];
AA11 = A[6];
AA21 = A[7];
AA31 = A[8];
AA41 = A[9];
AA02 = A[10];
AA12 = A[11];
AA22 = A[12];
AA32 = A[13];
AA42 = A[14];
AA03 = A[15];
AA13 = A[16];
AA23 = A[17];
AA33 = A[18];
AA43 = A[19];
AA04 = A[20];
AA14 = A[21];
AA24 = A[22];
AA34 = A[23];
AA44 = A[24];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
/****************** iteration 4 ****************/
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
}
}
template<typename T, int arch>
__global__ void matinv_6x6_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 6;
int perm0, perm1, perm2, perm3, perm4, perm5;
int icol0, icol1, icol2, icol3, icol4, icol5;
T AA00, AA01, AA02, AA03, AA04, AA05;
T AA10, AA11, AA12, AA13, AA14, AA15;
T AA20, AA21, AA22, AA23, AA24, AA25;
T AA30, AA31, AA32, AA33, AA34, AA35;
T AA40, AA41, AA42, AA43, AA44, AA45;
T AA50, AA51, AA52, AA53, AA54, AA55;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA01 = A[6];
AA11 = A[7];
AA21 = A[8];
AA31 = A[9];
AA41 = A[10];
AA51 = A[11];
AA02 = A[12];
AA12 = A[13];
AA22 = A[14];
AA32 = A[15];
AA42 = A[16];
AA52 = A[17];
AA03 = A[18];
AA13 = A[19];
AA23 = A[20];
AA33 = A[21];
AA43 = A[22];
AA53 = A[23];
AA04 = A[24];
AA14 = A[25];
AA24 = A[26];
AA34 = A[27];
AA44 = A[28];
AA54 = A[29];
AA05 = A[30];
AA15 = A[31];
AA25 = A[32];
AA35 = A[33];
AA45 = A[34];
AA55 = A[35];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
/****************** iteration 5 ****************/
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
}
}
template<typename T, int arch>
__global__ void matinv_7x7_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 7;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA01 = A[7];
AA11 = A[8];
AA21 = A[9];
AA31 = A[10];
AA41 = A[11];
AA51 = A[12];
AA61 = A[13];
AA02 = A[14];
AA12 = A[15];
AA22 = A[16];
AA32 = A[17];
AA42 = A[18];
AA52 = A[19];
AA62 = A[20];
AA03 = A[21];
AA13 = A[22];
AA23 = A[23];
AA33 = A[24];
AA43 = A[25];
AA53 = A[26];
AA63 = A[27];
AA04 = A[28];
AA14 = A[29];
AA24 = A[30];
AA34 = A[31];
AA44 = A[32];
AA54 = A[33];
AA64 = A[34];
AA05 = A[35];
AA15 = A[36];
AA25 = A[37];
AA35 = A[38];
AA45 = A[39];
AA55 = A[40];
AA65 = A[41];
AA06 = A[42];
AA16 = A[43];
AA26 = A[44];
AA36 = A[45];
AA46 = A[46];
AA56 = A[47];
AA66 = A[48];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
/****************** iteration 6 ****************/
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
}
}
template<typename T, int arch>
__global__ void matinv_8x8_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 8;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6, perm7;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6, icol7;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06, AA07;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16, AA17;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26, AA27;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36, AA37;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46, AA47;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56, AA57;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66, AA67;
T AA70, AA71, AA72, AA73, AA74, AA75, AA76, AA77;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA70 = A[7];
AA01 = A[8];
AA11 = A[9];
AA21 = A[10];
AA31 = A[11];
AA41 = A[12];
AA51 = A[13];
AA61 = A[14];
AA71 = A[15];
AA02 = A[16];
AA12 = A[17];
AA22 = A[18];
AA32 = A[19];
AA42 = A[20];
AA52 = A[21];
AA62 = A[22];
AA72 = A[23];
AA03 = A[24];
AA13 = A[25];
AA23 = A[26];
AA33 = A[27];
AA43 = A[28];
AA53 = A[29];
AA63 = A[30];
AA73 = A[31];
AA04 = A[32];
AA14 = A[33];
AA24 = A[34];
AA34 = A[35];
AA44 = A[36];
AA54 = A[37];
AA64 = A[38];
AA74 = A[39];
AA05 = A[40];
AA15 = A[41];
AA25 = A[42];
AA35 = A[43];
AA45 = A[44];
AA55 = A[45];
AA65 = A[46];
AA75 = A[47];
AA06 = A[48];
AA16 = A[49];
AA26 = A[50];
AA36 = A[51];
AA46 = A[52];
AA56 = A[53];
AA66 = A[54];
AA76 = A[55];
AA07 = A[56];
AA17 = A[57];
AA27 = A[58];
AA37 = A[59];
AA47 = A[60];
AA57 = A[61];
AA67 = A[62];
AA77 = A[63];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
perm7 = 7;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA70);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
tmp = AA07; AA07 = AA17; AA17 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
tmp = AA07; AA07 = AA27; AA27 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
tmp = AA07; AA07 = AA37; AA37 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
tmp = AA07; AA07 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
tmp = AA07; AA07 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
tmp = AA07; AA07 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA00; AA00 = AA70; AA70 = tmp;
tmp = AA01; AA01 = AA71; AA71 = tmp;
tmp = AA02; AA02 = AA72; AA72 = tmp;
tmp = AA03; AA03 = AA73; AA73 = tmp;
tmp = AA04; AA04 = AA74; AA74 = tmp;
tmp = AA05; AA05 = AA75; AA75 = tmp;
tmp = AA06; AA06 = AA76; AA76 = tmp;
tmp = AA07; AA07 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
AA07 = mulOp (tmp, AA07);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
AA17 = fmnaOp (tmp, AA07, AA17);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
AA27 = fmnaOp (tmp, AA07, AA27);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
AA37 = fmnaOp (tmp, AA07, AA37);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
AA47 = fmnaOp (tmp, AA07, AA47);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
AA57 = fmnaOp (tmp, AA07, AA57);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
AA67 = fmnaOp (tmp, AA07, AA67);
tmp = AA70;
AA70 = mulOp (negOp(tmp), AA00);
AA71 = fmnaOp (tmp, AA01, AA71);
AA72 = fmnaOp (tmp, AA02, AA72);
AA73 = fmnaOp (tmp, AA03, AA73);
AA74 = fmnaOp (tmp, AA04, AA74);
AA75 = fmnaOp (tmp, AA05, AA75);
AA76 = fmnaOp (tmp, AA06, AA76);
AA77 = fmnaOp (tmp, AA07, AA77);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA71);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
tmp = AA17; AA17 = AA27; AA27 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
tmp = AA17; AA17 = AA37; AA37 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
tmp = AA17; AA17 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
tmp = AA17; AA17 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
tmp = AA17; AA17 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA10; AA10 = AA70; AA70 = tmp;
tmp = AA11; AA11 = AA71; AA71 = tmp;
tmp = AA12; AA12 = AA72; AA72 = tmp;
tmp = AA13; AA13 = AA73; AA73 = tmp;
tmp = AA14; AA14 = AA74; AA74 = tmp;
tmp = AA15; AA15 = AA75; AA75 = tmp;
tmp = AA16; AA16 = AA76; AA76 = tmp;
tmp = AA17; AA17 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
AA17 = mulOp (tmp, AA17);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
AA07 = fmnaOp (tmp, AA17, AA07);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
AA27 = fmnaOp (tmp, AA17, AA27);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
AA37 = fmnaOp (tmp, AA17, AA37);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
AA47 = fmnaOp (tmp, AA17, AA47);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
AA57 = fmnaOp (tmp, AA17, AA57);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
AA67 = fmnaOp (tmp, AA17, AA67);
tmp = AA71;
AA70 = fmnaOp (tmp, AA10, AA70);
AA71 = mulOp (negOp(tmp), AA11);
AA72 = fmnaOp (tmp, AA12, AA72);
AA73 = fmnaOp (tmp, AA13, AA73);
AA74 = fmnaOp (tmp, AA14, AA74);
AA75 = fmnaOp (tmp, AA15, AA75);
AA76 = fmnaOp (tmp, AA16, AA76);
AA77 = fmnaOp (tmp, AA17, AA77);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA72);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
tmp = AA27; AA27 = AA37; AA37 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
tmp = AA27; AA27 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
tmp = AA27; AA27 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
tmp = AA27; AA27 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA20; AA20 = AA70; AA70 = tmp;
tmp = AA21; AA21 = AA71; AA71 = tmp;
tmp = AA22; AA22 = AA72; AA72 = tmp;
tmp = AA23; AA23 = AA73; AA73 = tmp;
tmp = AA24; AA24 = AA74; AA74 = tmp;
tmp = AA25; AA25 = AA75; AA75 = tmp;
tmp = AA26; AA26 = AA76; AA76 = tmp;
tmp = AA27; AA27 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
AA27 = mulOp (tmp, AA27);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
AA07 = fmnaOp (tmp, AA27, AA07);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
AA17 = fmnaOp (tmp, AA27, AA17);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
AA37 = fmnaOp (tmp, AA27, AA37);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
AA47 = fmnaOp (tmp, AA27, AA47);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
AA57 = fmnaOp (tmp, AA27, AA57);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
AA67 = fmnaOp (tmp, AA27, AA67);
tmp = AA72;
AA70 = fmnaOp (tmp, AA20, AA70);
AA71 = fmnaOp (tmp, AA21, AA71);
AA72 = mulOp (negOp(tmp), AA22);
AA73 = fmnaOp (tmp, AA23, AA73);
AA74 = fmnaOp (tmp, AA24, AA74);
AA75 = fmnaOp (tmp, AA25, AA75);
AA76 = fmnaOp (tmp, AA26, AA76);
AA77 = fmnaOp (tmp, AA27, AA77);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA73);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
tmp = AA37; AA37 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
tmp = AA37; AA37 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
tmp = AA37; AA37 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA30; AA30 = AA70; AA70 = tmp;
tmp = AA31; AA31 = AA71; AA71 = tmp;
tmp = AA32; AA32 = AA72; AA72 = tmp;
tmp = AA33; AA33 = AA73; AA73 = tmp;
tmp = AA34; AA34 = AA74; AA74 = tmp;
tmp = AA35; AA35 = AA75; AA75 = tmp;
tmp = AA36; AA36 = AA76; AA76 = tmp;
tmp = AA37; AA37 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
AA37 = mulOp (tmp, AA37);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
AA07 = fmnaOp (tmp, AA37, AA07);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
AA17 = fmnaOp (tmp, AA37, AA17);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
AA27 = fmnaOp (tmp, AA37, AA27);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
AA47 = fmnaOp (tmp, AA37, AA47);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
AA57 = fmnaOp (tmp, AA37, AA57);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
AA67 = fmnaOp (tmp, AA37, AA67);
tmp = AA73;
AA70 = fmnaOp (tmp, AA30, AA70);
AA71 = fmnaOp (tmp, AA31, AA71);
AA72 = fmnaOp (tmp, AA32, AA72);
AA73 = mulOp (negOp(tmp), AA33);
AA74 = fmnaOp (tmp, AA34, AA74);
AA75 = fmnaOp (tmp, AA35, AA75);
AA76 = fmnaOp (tmp, AA36, AA76);
AA77 = fmnaOp (tmp, AA37, AA77);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA74);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
tmp = AA47; AA47 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
tmp = AA47; AA47 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA40; AA40 = AA70; AA70 = tmp;
tmp = AA41; AA41 = AA71; AA71 = tmp;
tmp = AA42; AA42 = AA72; AA72 = tmp;
tmp = AA43; AA43 = AA73; AA73 = tmp;
tmp = AA44; AA44 = AA74; AA74 = tmp;
tmp = AA45; AA45 = AA75; AA75 = tmp;
tmp = AA46; AA46 = AA76; AA76 = tmp;
tmp = AA47; AA47 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
AA47 = mulOp (tmp, AA47);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
AA07 = fmnaOp (tmp, AA47, AA07);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
AA17 = fmnaOp (tmp, AA47, AA17);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
AA27 = fmnaOp (tmp, AA47, AA27);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
AA37 = fmnaOp (tmp, AA47, AA37);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
AA57 = fmnaOp (tmp, AA47, AA57);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
AA67 = fmnaOp (tmp, AA47, AA67);
tmp = AA74;
AA70 = fmnaOp (tmp, AA40, AA70);
AA71 = fmnaOp (tmp, AA41, AA71);
AA72 = fmnaOp (tmp, AA42, AA72);
AA73 = fmnaOp (tmp, AA43, AA73);
AA74 = mulOp (negOp(tmp), AA44);
AA75 = fmnaOp (tmp, AA45, AA75);
AA76 = fmnaOp (tmp, AA46, AA76);
AA77 = fmnaOp (tmp, AA47, AA77);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA75);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
tmp = AA57; AA57 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA50; AA50 = AA70; AA70 = tmp;
tmp = AA51; AA51 = AA71; AA71 = tmp;
tmp = AA52; AA52 = AA72; AA72 = tmp;
tmp = AA53; AA53 = AA73; AA73 = tmp;
tmp = AA54; AA54 = AA74; AA74 = tmp;
tmp = AA55; AA55 = AA75; AA75 = tmp;
tmp = AA56; AA56 = AA76; AA76 = tmp;
tmp = AA57; AA57 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
AA57 = mulOp (tmp, AA57);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
AA07 = fmnaOp (tmp, AA57, AA07);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
AA17 = fmnaOp (tmp, AA57, AA17);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
AA27 = fmnaOp (tmp, AA57, AA27);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
AA37 = fmnaOp (tmp, AA57, AA37);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
AA47 = fmnaOp (tmp, AA57, AA47);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
AA67 = fmnaOp (tmp, AA57, AA67);
tmp = AA75;
AA70 = fmnaOp (tmp, AA50, AA70);
AA71 = fmnaOp (tmp, AA51, AA71);
AA72 = fmnaOp (tmp, AA52, AA72);
AA73 = fmnaOp (tmp, AA53, AA73);
AA74 = fmnaOp (tmp, AA54, AA74);
AA75 = mulOp (negOp(tmp), AA55);
AA76 = fmnaOp (tmp, AA56, AA76);
AA77 = fmnaOp (tmp, AA57, AA77);
/****************** iteration 6 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA66);
pvt = 6;
t = absOp (AA76);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 6 */
if (pvt == 7) {
tmp = AA60; AA60 = AA70; AA70 = tmp;
tmp = AA61; AA61 = AA71; AA71 = tmp;
tmp = AA62; AA62 = AA72; AA72 = tmp;
tmp = AA63; AA63 = AA73; AA73 = tmp;
tmp = AA64; AA64 = AA74; AA74 = tmp;
tmp = AA65; AA65 = AA75; AA75 = tmp;
tmp = AA66; AA66 = AA76; AA76 = tmp;
tmp = AA67; AA67 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
AA67 = mulOp (tmp, AA67);
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
AA07 = fmnaOp (tmp, AA67, AA07);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
AA17 = fmnaOp (tmp, AA67, AA17);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
AA27 = fmnaOp (tmp, AA67, AA27);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
AA37 = fmnaOp (tmp, AA67, AA37);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
AA47 = fmnaOp (tmp, AA67, AA47);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
AA57 = fmnaOp (tmp, AA67, AA57);
tmp = AA76;
AA70 = fmnaOp (tmp, AA60, AA70);
AA71 = fmnaOp (tmp, AA61, AA71);
AA72 = fmnaOp (tmp, AA62, AA72);
AA73 = fmnaOp (tmp, AA63, AA73);
AA74 = fmnaOp (tmp, AA64, AA74);
AA75 = fmnaOp (tmp, AA65, AA75);
AA76 = mulOp (negOp(tmp), AA66);
AA77 = fmnaOp (tmp, AA67, AA77);
/****************** iteration 7 ****************/
/* scale current row */
tmp = rcpOp (AA77);
icol7 = perm7;
AA70 = mulOp (tmp, AA70);
AA71 = mulOp (tmp, AA71);
AA72 = mulOp (tmp, AA72);
AA73 = mulOp (tmp, AA73);
AA74 = mulOp (tmp, AA74);
AA75 = mulOp (tmp, AA75);
AA76 = mulOp (tmp, AA76);
AA77 = tmp;
/* eliminate above and below current row */
tmp = AA07;
AA00 = fmnaOp (tmp, AA70, AA00);
AA01 = fmnaOp (tmp, AA71, AA01);
AA02 = fmnaOp (tmp, AA72, AA02);
AA03 = fmnaOp (tmp, AA73, AA03);
AA04 = fmnaOp (tmp, AA74, AA04);
AA05 = fmnaOp (tmp, AA75, AA05);
AA06 = fmnaOp (tmp, AA76, AA06);
AA07 = mulOp (negOp(tmp), AA77);
tmp = AA17;
AA10 = fmnaOp (tmp, AA70, AA10);
AA11 = fmnaOp (tmp, AA71, AA11);
AA12 = fmnaOp (tmp, AA72, AA12);
AA13 = fmnaOp (tmp, AA73, AA13);
AA14 = fmnaOp (tmp, AA74, AA14);
AA15 = fmnaOp (tmp, AA75, AA15);
AA16 = fmnaOp (tmp, AA76, AA16);
AA17 = mulOp (negOp(tmp), AA77);
tmp = AA27;
AA20 = fmnaOp (tmp, AA70, AA20);
AA21 = fmnaOp (tmp, AA71, AA21);
AA22 = fmnaOp (tmp, AA72, AA22);
AA23 = fmnaOp (tmp, AA73, AA23);
AA24 = fmnaOp (tmp, AA74, AA24);
AA25 = fmnaOp (tmp, AA75, AA25);
AA26 = fmnaOp (tmp, AA76, AA26);
AA27 = mulOp (negOp(tmp), AA77);
tmp = AA37;
AA30 = fmnaOp (tmp, AA70, AA30);
AA31 = fmnaOp (tmp, AA71, AA31);
AA32 = fmnaOp (tmp, AA72, AA32);
AA33 = fmnaOp (tmp, AA73, AA33);
AA34 = fmnaOp (tmp, AA74, AA34);
AA35 = fmnaOp (tmp, AA75, AA35);
AA36 = fmnaOp (tmp, AA76, AA36);
AA37 = mulOp (negOp(tmp), AA77);
tmp = AA47;
AA40 = fmnaOp (tmp, AA70, AA40);
AA41 = fmnaOp (tmp, AA71, AA41);
AA42 = fmnaOp (tmp, AA72, AA42);
AA43 = fmnaOp (tmp, AA73, AA43);
AA44 = fmnaOp (tmp, AA74, AA44);
AA45 = fmnaOp (tmp, AA75, AA45);
AA46 = fmnaOp (tmp, AA76, AA46);
AA47 = mulOp (negOp(tmp), AA77);
tmp = AA57;
AA50 = fmnaOp (tmp, AA70, AA50);
AA51 = fmnaOp (tmp, AA71, AA51);
AA52 = fmnaOp (tmp, AA72, AA52);
AA53 = fmnaOp (tmp, AA73, AA53);
AA54 = fmnaOp (tmp, AA74, AA54);
AA55 = fmnaOp (tmp, AA75, AA55);
AA56 = fmnaOp (tmp, AA76, AA56);
AA57 = mulOp (negOp(tmp), AA77);
tmp = AA67;
AA60 = fmnaOp (tmp, AA70, AA60);
AA61 = fmnaOp (tmp, AA71, AA61);
AA62 = fmnaOp (tmp, AA72, AA62);
AA63 = fmnaOp (tmp, AA73, AA63);
AA64 = fmnaOp (tmp, AA74, AA64);
AA65 = fmnaOp (tmp, AA75, AA65);
AA66 = fmnaOp (tmp, AA76, AA66);
AA67 = mulOp (negOp(tmp), AA77);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(7,icol0) = AA70;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(7,icol1) = AA71;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(7,icol2) = AA72;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(7,icol3) = AA73;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(7,icol4) = AA74;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(7,icol5) = AA75;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
Ainv(7,icol6) = AA76;
Ainv(0,icol7) = AA07;
Ainv(1,icol7) = AA17;
Ainv(2,icol7) = AA27;
Ainv(3,icol7) = AA37;
Ainv(4,icol7) = AA47;
Ainv(5,icol7) = AA57;
Ainv(6,icol7) = AA67;
Ainv(7,icol7) = AA77;
}
}
template<typename T, int arch>
__global__ void matinv_9x9_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 9;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6, perm7, perm8;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6, icol7, icol8;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06, AA07, AA08;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16, AA17, AA18;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26, AA27, AA28;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36, AA37, AA38;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46, AA47, AA48;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56, AA57, AA58;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66, AA67, AA68;
T AA70, AA71, AA72, AA73, AA74, AA75, AA76, AA77, AA78;
T AA80, AA81, AA82, AA83, AA84, AA85, AA86, AA87, AA88;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA70 = A[7];
AA80 = A[8];
AA01 = A[9];
AA11 = A[10];
AA21 = A[11];
AA31 = A[12];
AA41 = A[13];
AA51 = A[14];
AA61 = A[15];
AA71 = A[16];
AA81 = A[17];
AA02 = A[18];
AA12 = A[19];
AA22 = A[20];
AA32 = A[21];
AA42 = A[22];
AA52 = A[23];
AA62 = A[24];
AA72 = A[25];
AA82 = A[26];
AA03 = A[27];
AA13 = A[28];
AA23 = A[29];
AA33 = A[30];
AA43 = A[31];
AA53 = A[32];
AA63 = A[33];
AA73 = A[34];
AA83 = A[35];
AA04 = A[36];
AA14 = A[37];
AA24 = A[38];
AA34 = A[39];
AA44 = A[40];
AA54 = A[41];
AA64 = A[42];
AA74 = A[43];
AA84 = A[44];
AA05 = A[45];
AA15 = A[46];
AA25 = A[47];
AA35 = A[48];
AA45 = A[49];
AA55 = A[50];
AA65 = A[51];
AA75 = A[52];
AA85 = A[53];
AA06 = A[54];
AA16 = A[55];
AA26 = A[56];
AA36 = A[57];
AA46 = A[58];
AA56 = A[59];
AA66 = A[60];
AA76 = A[61];
AA86 = A[62];
AA07 = A[63];
AA17 = A[64];
AA27 = A[65];
AA37 = A[66];
AA47 = A[67];
AA57 = A[68];
AA67 = A[69];
AA77 = A[70];
AA87 = A[71];
AA08 = A[72];
AA18 = A[73];
AA28 = A[74];
AA38 = A[75];
AA48 = A[76];
AA58 = A[77];
AA68 = A[78];
AA78 = A[79];
AA88 = A[80];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
perm7 = 7;
perm8 = 8;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA70);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA80);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
tmp = AA07; AA07 = AA17; AA17 = tmp;
tmp = AA08; AA08 = AA18; AA18 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
tmp = AA07; AA07 = AA27; AA27 = tmp;
tmp = AA08; AA08 = AA28; AA28 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
tmp = AA07; AA07 = AA37; AA37 = tmp;
tmp = AA08; AA08 = AA38; AA38 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
tmp = AA07; AA07 = AA47; AA47 = tmp;
tmp = AA08; AA08 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
tmp = AA07; AA07 = AA57; AA57 = tmp;
tmp = AA08; AA08 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
tmp = AA07; AA07 = AA67; AA67 = tmp;
tmp = AA08; AA08 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA00; AA00 = AA70; AA70 = tmp;
tmp = AA01; AA01 = AA71; AA71 = tmp;
tmp = AA02; AA02 = AA72; AA72 = tmp;
tmp = AA03; AA03 = AA73; AA73 = tmp;
tmp = AA04; AA04 = AA74; AA74 = tmp;
tmp = AA05; AA05 = AA75; AA75 = tmp;
tmp = AA06; AA06 = AA76; AA76 = tmp;
tmp = AA07; AA07 = AA77; AA77 = tmp;
tmp = AA08; AA08 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA00; AA00 = AA80; AA80 = tmp;
tmp = AA01; AA01 = AA81; AA81 = tmp;
tmp = AA02; AA02 = AA82; AA82 = tmp;
tmp = AA03; AA03 = AA83; AA83 = tmp;
tmp = AA04; AA04 = AA84; AA84 = tmp;
tmp = AA05; AA05 = AA85; AA85 = tmp;
tmp = AA06; AA06 = AA86; AA86 = tmp;
tmp = AA07; AA07 = AA87; AA87 = tmp;
tmp = AA08; AA08 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
AA07 = mulOp (tmp, AA07);
AA08 = mulOp (tmp, AA08);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
AA17 = fmnaOp (tmp, AA07, AA17);
AA18 = fmnaOp (tmp, AA08, AA18);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
AA27 = fmnaOp (tmp, AA07, AA27);
AA28 = fmnaOp (tmp, AA08, AA28);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
AA37 = fmnaOp (tmp, AA07, AA37);
AA38 = fmnaOp (tmp, AA08, AA38);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
AA47 = fmnaOp (tmp, AA07, AA47);
AA48 = fmnaOp (tmp, AA08, AA48);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
AA57 = fmnaOp (tmp, AA07, AA57);
AA58 = fmnaOp (tmp, AA08, AA58);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
AA67 = fmnaOp (tmp, AA07, AA67);
AA68 = fmnaOp (tmp, AA08, AA68);
tmp = AA70;
AA70 = mulOp (negOp(tmp), AA00);
AA71 = fmnaOp (tmp, AA01, AA71);
AA72 = fmnaOp (tmp, AA02, AA72);
AA73 = fmnaOp (tmp, AA03, AA73);
AA74 = fmnaOp (tmp, AA04, AA74);
AA75 = fmnaOp (tmp, AA05, AA75);
AA76 = fmnaOp (tmp, AA06, AA76);
AA77 = fmnaOp (tmp, AA07, AA77);
AA78 = fmnaOp (tmp, AA08, AA78);
tmp = AA80;
AA80 = mulOp (negOp(tmp), AA00);
AA81 = fmnaOp (tmp, AA01, AA81);
AA82 = fmnaOp (tmp, AA02, AA82);
AA83 = fmnaOp (tmp, AA03, AA83);
AA84 = fmnaOp (tmp, AA04, AA84);
AA85 = fmnaOp (tmp, AA05, AA85);
AA86 = fmnaOp (tmp, AA06, AA86);
AA87 = fmnaOp (tmp, AA07, AA87);
AA88 = fmnaOp (tmp, AA08, AA88);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA71);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA81);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
tmp = AA17; AA17 = AA27; AA27 = tmp;
tmp = AA18; AA18 = AA28; AA28 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
tmp = AA17; AA17 = AA37; AA37 = tmp;
tmp = AA18; AA18 = AA38; AA38 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
tmp = AA17; AA17 = AA47; AA47 = tmp;
tmp = AA18; AA18 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
tmp = AA17; AA17 = AA57; AA57 = tmp;
tmp = AA18; AA18 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
tmp = AA17; AA17 = AA67; AA67 = tmp;
tmp = AA18; AA18 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA10; AA10 = AA70; AA70 = tmp;
tmp = AA11; AA11 = AA71; AA71 = tmp;
tmp = AA12; AA12 = AA72; AA72 = tmp;
tmp = AA13; AA13 = AA73; AA73 = tmp;
tmp = AA14; AA14 = AA74; AA74 = tmp;
tmp = AA15; AA15 = AA75; AA75 = tmp;
tmp = AA16; AA16 = AA76; AA76 = tmp;
tmp = AA17; AA17 = AA77; AA77 = tmp;
tmp = AA18; AA18 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA10; AA10 = AA80; AA80 = tmp;
tmp = AA11; AA11 = AA81; AA81 = tmp;
tmp = AA12; AA12 = AA82; AA82 = tmp;
tmp = AA13; AA13 = AA83; AA83 = tmp;
tmp = AA14; AA14 = AA84; AA84 = tmp;
tmp = AA15; AA15 = AA85; AA85 = tmp;
tmp = AA16; AA16 = AA86; AA86 = tmp;
tmp = AA17; AA17 = AA87; AA87 = tmp;
tmp = AA18; AA18 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
AA17 = mulOp (tmp, AA17);
AA18 = mulOp (tmp, AA18);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
AA07 = fmnaOp (tmp, AA17, AA07);
AA08 = fmnaOp (tmp, AA18, AA08);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
AA27 = fmnaOp (tmp, AA17, AA27);
AA28 = fmnaOp (tmp, AA18, AA28);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
AA37 = fmnaOp (tmp, AA17, AA37);
AA38 = fmnaOp (tmp, AA18, AA38);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
AA47 = fmnaOp (tmp, AA17, AA47);
AA48 = fmnaOp (tmp, AA18, AA48);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
AA57 = fmnaOp (tmp, AA17, AA57);
AA58 = fmnaOp (tmp, AA18, AA58);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
AA67 = fmnaOp (tmp, AA17, AA67);
AA68 = fmnaOp (tmp, AA18, AA68);
tmp = AA71;
AA70 = fmnaOp (tmp, AA10, AA70);
AA71 = mulOp (negOp(tmp), AA11);
AA72 = fmnaOp (tmp, AA12, AA72);
AA73 = fmnaOp (tmp, AA13, AA73);
AA74 = fmnaOp (tmp, AA14, AA74);
AA75 = fmnaOp (tmp, AA15, AA75);
AA76 = fmnaOp (tmp, AA16, AA76);
AA77 = fmnaOp (tmp, AA17, AA77);
AA78 = fmnaOp (tmp, AA18, AA78);
tmp = AA81;
AA80 = fmnaOp (tmp, AA10, AA80);
AA81 = mulOp (negOp(tmp), AA11);
AA82 = fmnaOp (tmp, AA12, AA82);
AA83 = fmnaOp (tmp, AA13, AA83);
AA84 = fmnaOp (tmp, AA14, AA84);
AA85 = fmnaOp (tmp, AA15, AA85);
AA86 = fmnaOp (tmp, AA16, AA86);
AA87 = fmnaOp (tmp, AA17, AA87);
AA88 = fmnaOp (tmp, AA18, AA88);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA72);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA82);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
tmp = AA27; AA27 = AA37; AA37 = tmp;
tmp = AA28; AA28 = AA38; AA38 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
tmp = AA27; AA27 = AA47; AA47 = tmp;
tmp = AA28; AA28 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
tmp = AA27; AA27 = AA57; AA57 = tmp;
tmp = AA28; AA28 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
tmp = AA27; AA27 = AA67; AA67 = tmp;
tmp = AA28; AA28 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA20; AA20 = AA70; AA70 = tmp;
tmp = AA21; AA21 = AA71; AA71 = tmp;
tmp = AA22; AA22 = AA72; AA72 = tmp;
tmp = AA23; AA23 = AA73; AA73 = tmp;
tmp = AA24; AA24 = AA74; AA74 = tmp;
tmp = AA25; AA25 = AA75; AA75 = tmp;
tmp = AA26; AA26 = AA76; AA76 = tmp;
tmp = AA27; AA27 = AA77; AA77 = tmp;
tmp = AA28; AA28 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA20; AA20 = AA80; AA80 = tmp;
tmp = AA21; AA21 = AA81; AA81 = tmp;
tmp = AA22; AA22 = AA82; AA82 = tmp;
tmp = AA23; AA23 = AA83; AA83 = tmp;
tmp = AA24; AA24 = AA84; AA84 = tmp;
tmp = AA25; AA25 = AA85; AA85 = tmp;
tmp = AA26; AA26 = AA86; AA86 = tmp;
tmp = AA27; AA27 = AA87; AA87 = tmp;
tmp = AA28; AA28 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
AA27 = mulOp (tmp, AA27);
AA28 = mulOp (tmp, AA28);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
AA07 = fmnaOp (tmp, AA27, AA07);
AA08 = fmnaOp (tmp, AA28, AA08);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
AA17 = fmnaOp (tmp, AA27, AA17);
AA18 = fmnaOp (tmp, AA28, AA18);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
AA37 = fmnaOp (tmp, AA27, AA37);
AA38 = fmnaOp (tmp, AA28, AA38);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
AA47 = fmnaOp (tmp, AA27, AA47);
AA48 = fmnaOp (tmp, AA28, AA48);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
AA57 = fmnaOp (tmp, AA27, AA57);
AA58 = fmnaOp (tmp, AA28, AA58);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
AA67 = fmnaOp (tmp, AA27, AA67);
AA68 = fmnaOp (tmp, AA28, AA68);
tmp = AA72;
AA70 = fmnaOp (tmp, AA20, AA70);
AA71 = fmnaOp (tmp, AA21, AA71);
AA72 = mulOp (negOp(tmp), AA22);
AA73 = fmnaOp (tmp, AA23, AA73);
AA74 = fmnaOp (tmp, AA24, AA74);
AA75 = fmnaOp (tmp, AA25, AA75);
AA76 = fmnaOp (tmp, AA26, AA76);
AA77 = fmnaOp (tmp, AA27, AA77);
AA78 = fmnaOp (tmp, AA28, AA78);
tmp = AA82;
AA80 = fmnaOp (tmp, AA20, AA80);
AA81 = fmnaOp (tmp, AA21, AA81);
AA82 = mulOp (negOp(tmp), AA22);
AA83 = fmnaOp (tmp, AA23, AA83);
AA84 = fmnaOp (tmp, AA24, AA84);
AA85 = fmnaOp (tmp, AA25, AA85);
AA86 = fmnaOp (tmp, AA26, AA86);
AA87 = fmnaOp (tmp, AA27, AA87);
AA88 = fmnaOp (tmp, AA28, AA88);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA73);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA83);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
tmp = AA37; AA37 = AA47; AA47 = tmp;
tmp = AA38; AA38 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
tmp = AA37; AA37 = AA57; AA57 = tmp;
tmp = AA38; AA38 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
tmp = AA37; AA37 = AA67; AA67 = tmp;
tmp = AA38; AA38 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA30; AA30 = AA70; AA70 = tmp;
tmp = AA31; AA31 = AA71; AA71 = tmp;
tmp = AA32; AA32 = AA72; AA72 = tmp;
tmp = AA33; AA33 = AA73; AA73 = tmp;
tmp = AA34; AA34 = AA74; AA74 = tmp;
tmp = AA35; AA35 = AA75; AA75 = tmp;
tmp = AA36; AA36 = AA76; AA76 = tmp;
tmp = AA37; AA37 = AA77; AA77 = tmp;
tmp = AA38; AA38 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA30; AA30 = AA80; AA80 = tmp;
tmp = AA31; AA31 = AA81; AA81 = tmp;
tmp = AA32; AA32 = AA82; AA82 = tmp;
tmp = AA33; AA33 = AA83; AA83 = tmp;
tmp = AA34; AA34 = AA84; AA84 = tmp;
tmp = AA35; AA35 = AA85; AA85 = tmp;
tmp = AA36; AA36 = AA86; AA86 = tmp;
tmp = AA37; AA37 = AA87; AA87 = tmp;
tmp = AA38; AA38 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
AA37 = mulOp (tmp, AA37);
AA38 = mulOp (tmp, AA38);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
AA07 = fmnaOp (tmp, AA37, AA07);
AA08 = fmnaOp (tmp, AA38, AA08);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
AA17 = fmnaOp (tmp, AA37, AA17);
AA18 = fmnaOp (tmp, AA38, AA18);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
AA27 = fmnaOp (tmp, AA37, AA27);
AA28 = fmnaOp (tmp, AA38, AA28);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
AA47 = fmnaOp (tmp, AA37, AA47);
AA48 = fmnaOp (tmp, AA38, AA48);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
AA57 = fmnaOp (tmp, AA37, AA57);
AA58 = fmnaOp (tmp, AA38, AA58);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
AA67 = fmnaOp (tmp, AA37, AA67);
AA68 = fmnaOp (tmp, AA38, AA68);
tmp = AA73;
AA70 = fmnaOp (tmp, AA30, AA70);
AA71 = fmnaOp (tmp, AA31, AA71);
AA72 = fmnaOp (tmp, AA32, AA72);
AA73 = mulOp (negOp(tmp), AA33);
AA74 = fmnaOp (tmp, AA34, AA74);
AA75 = fmnaOp (tmp, AA35, AA75);
AA76 = fmnaOp (tmp, AA36, AA76);
AA77 = fmnaOp (tmp, AA37, AA77);
AA78 = fmnaOp (tmp, AA38, AA78);
tmp = AA83;
AA80 = fmnaOp (tmp, AA30, AA80);
AA81 = fmnaOp (tmp, AA31, AA81);
AA82 = fmnaOp (tmp, AA32, AA82);
AA83 = mulOp (negOp(tmp), AA33);
AA84 = fmnaOp (tmp, AA34, AA84);
AA85 = fmnaOp (tmp, AA35, AA85);
AA86 = fmnaOp (tmp, AA36, AA86);
AA87 = fmnaOp (tmp, AA37, AA87);
AA88 = fmnaOp (tmp, AA38, AA88);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA74);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA84);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
tmp = AA47; AA47 = AA57; AA57 = tmp;
tmp = AA48; AA48 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
tmp = AA47; AA47 = AA67; AA67 = tmp;
tmp = AA48; AA48 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA40; AA40 = AA70; AA70 = tmp;
tmp = AA41; AA41 = AA71; AA71 = tmp;
tmp = AA42; AA42 = AA72; AA72 = tmp;
tmp = AA43; AA43 = AA73; AA73 = tmp;
tmp = AA44; AA44 = AA74; AA74 = tmp;
tmp = AA45; AA45 = AA75; AA75 = tmp;
tmp = AA46; AA46 = AA76; AA76 = tmp;
tmp = AA47; AA47 = AA77; AA77 = tmp;
tmp = AA48; AA48 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA40; AA40 = AA80; AA80 = tmp;
tmp = AA41; AA41 = AA81; AA81 = tmp;
tmp = AA42; AA42 = AA82; AA82 = tmp;
tmp = AA43; AA43 = AA83; AA83 = tmp;
tmp = AA44; AA44 = AA84; AA84 = tmp;
tmp = AA45; AA45 = AA85; AA85 = tmp;
tmp = AA46; AA46 = AA86; AA86 = tmp;
tmp = AA47; AA47 = AA87; AA87 = tmp;
tmp = AA48; AA48 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
AA47 = mulOp (tmp, AA47);
AA48 = mulOp (tmp, AA48);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
AA07 = fmnaOp (tmp, AA47, AA07);
AA08 = fmnaOp (tmp, AA48, AA08);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
AA17 = fmnaOp (tmp, AA47, AA17);
AA18 = fmnaOp (tmp, AA48, AA18);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
AA27 = fmnaOp (tmp, AA47, AA27);
AA28 = fmnaOp (tmp, AA48, AA28);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
AA37 = fmnaOp (tmp, AA47, AA37);
AA38 = fmnaOp (tmp, AA48, AA38);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
AA57 = fmnaOp (tmp, AA47, AA57);
AA58 = fmnaOp (tmp, AA48, AA58);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
AA67 = fmnaOp (tmp, AA47, AA67);
AA68 = fmnaOp (tmp, AA48, AA68);
tmp = AA74;
AA70 = fmnaOp (tmp, AA40, AA70);
AA71 = fmnaOp (tmp, AA41, AA71);
AA72 = fmnaOp (tmp, AA42, AA72);
AA73 = fmnaOp (tmp, AA43, AA73);
AA74 = mulOp (negOp(tmp), AA44);
AA75 = fmnaOp (tmp, AA45, AA75);
AA76 = fmnaOp (tmp, AA46, AA76);
AA77 = fmnaOp (tmp, AA47, AA77);
AA78 = fmnaOp (tmp, AA48, AA78);
tmp = AA84;
AA80 = fmnaOp (tmp, AA40, AA80);
AA81 = fmnaOp (tmp, AA41, AA81);
AA82 = fmnaOp (tmp, AA42, AA82);
AA83 = fmnaOp (tmp, AA43, AA83);
AA84 = mulOp (negOp(tmp), AA44);
AA85 = fmnaOp (tmp, AA45, AA85);
AA86 = fmnaOp (tmp, AA46, AA86);
AA87 = fmnaOp (tmp, AA47, AA87);
AA88 = fmnaOp (tmp, AA48, AA88);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA75);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA85);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
tmp = AA57; AA57 = AA67; AA67 = tmp;
tmp = AA58; AA58 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA50; AA50 = AA70; AA70 = tmp;
tmp = AA51; AA51 = AA71; AA71 = tmp;
tmp = AA52; AA52 = AA72; AA72 = tmp;
tmp = AA53; AA53 = AA73; AA73 = tmp;
tmp = AA54; AA54 = AA74; AA74 = tmp;
tmp = AA55; AA55 = AA75; AA75 = tmp;
tmp = AA56; AA56 = AA76; AA76 = tmp;
tmp = AA57; AA57 = AA77; AA77 = tmp;
tmp = AA58; AA58 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA50; AA50 = AA80; AA80 = tmp;
tmp = AA51; AA51 = AA81; AA81 = tmp;
tmp = AA52; AA52 = AA82; AA82 = tmp;
tmp = AA53; AA53 = AA83; AA83 = tmp;
tmp = AA54; AA54 = AA84; AA84 = tmp;
tmp = AA55; AA55 = AA85; AA85 = tmp;
tmp = AA56; AA56 = AA86; AA86 = tmp;
tmp = AA57; AA57 = AA87; AA87 = tmp;
tmp = AA58; AA58 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
AA57 = mulOp (tmp, AA57);
AA58 = mulOp (tmp, AA58);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
AA07 = fmnaOp (tmp, AA57, AA07);
AA08 = fmnaOp (tmp, AA58, AA08);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
AA17 = fmnaOp (tmp, AA57, AA17);
AA18 = fmnaOp (tmp, AA58, AA18);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
AA27 = fmnaOp (tmp, AA57, AA27);
AA28 = fmnaOp (tmp, AA58, AA28);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
AA37 = fmnaOp (tmp, AA57, AA37);
AA38 = fmnaOp (tmp, AA58, AA38);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
AA47 = fmnaOp (tmp, AA57, AA47);
AA48 = fmnaOp (tmp, AA58, AA48);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
AA67 = fmnaOp (tmp, AA57, AA67);
AA68 = fmnaOp (tmp, AA58, AA68);
tmp = AA75;
AA70 = fmnaOp (tmp, AA50, AA70);
AA71 = fmnaOp (tmp, AA51, AA71);
AA72 = fmnaOp (tmp, AA52, AA72);
AA73 = fmnaOp (tmp, AA53, AA73);
AA74 = fmnaOp (tmp, AA54, AA74);
AA75 = mulOp (negOp(tmp), AA55);
AA76 = fmnaOp (tmp, AA56, AA76);
AA77 = fmnaOp (tmp, AA57, AA77);
AA78 = fmnaOp (tmp, AA58, AA78);
tmp = AA85;
AA80 = fmnaOp (tmp, AA50, AA80);
AA81 = fmnaOp (tmp, AA51, AA81);
AA82 = fmnaOp (tmp, AA52, AA82);
AA83 = fmnaOp (tmp, AA53, AA83);
AA84 = fmnaOp (tmp, AA54, AA84);
AA85 = mulOp (negOp(tmp), AA55);
AA86 = fmnaOp (tmp, AA56, AA86);
AA87 = fmnaOp (tmp, AA57, AA87);
AA88 = fmnaOp (tmp, AA58, AA88);
/****************** iteration 6 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA66);
pvt = 6;
t = absOp (AA76);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA86);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 6 */
if (pvt == 7) {
tmp = AA60; AA60 = AA70; AA70 = tmp;
tmp = AA61; AA61 = AA71; AA71 = tmp;
tmp = AA62; AA62 = AA72; AA72 = tmp;
tmp = AA63; AA63 = AA73; AA73 = tmp;
tmp = AA64; AA64 = AA74; AA74 = tmp;
tmp = AA65; AA65 = AA75; AA75 = tmp;
tmp = AA66; AA66 = AA76; AA76 = tmp;
tmp = AA67; AA67 = AA77; AA77 = tmp;
tmp = AA68; AA68 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA60; AA60 = AA80; AA80 = tmp;
tmp = AA61; AA61 = AA81; AA81 = tmp;
tmp = AA62; AA62 = AA82; AA82 = tmp;
tmp = AA63; AA63 = AA83; AA83 = tmp;
tmp = AA64; AA64 = AA84; AA84 = tmp;
tmp = AA65; AA65 = AA85; AA85 = tmp;
tmp = AA66; AA66 = AA86; AA86 = tmp;
tmp = AA67; AA67 = AA87; AA87 = tmp;
tmp = AA68; AA68 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
AA67 = mulOp (tmp, AA67);
AA68 = mulOp (tmp, AA68);
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
AA07 = fmnaOp (tmp, AA67, AA07);
AA08 = fmnaOp (tmp, AA68, AA08);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
AA17 = fmnaOp (tmp, AA67, AA17);
AA18 = fmnaOp (tmp, AA68, AA18);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
AA27 = fmnaOp (tmp, AA67, AA27);
AA28 = fmnaOp (tmp, AA68, AA28);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
AA37 = fmnaOp (tmp, AA67, AA37);
AA38 = fmnaOp (tmp, AA68, AA38);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
AA47 = fmnaOp (tmp, AA67, AA47);
AA48 = fmnaOp (tmp, AA68, AA48);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
AA57 = fmnaOp (tmp, AA67, AA57);
AA58 = fmnaOp (tmp, AA68, AA58);
tmp = AA76;
AA70 = fmnaOp (tmp, AA60, AA70);
AA71 = fmnaOp (tmp, AA61, AA71);
AA72 = fmnaOp (tmp, AA62, AA72);
AA73 = fmnaOp (tmp, AA63, AA73);
AA74 = fmnaOp (tmp, AA64, AA74);
AA75 = fmnaOp (tmp, AA65, AA75);
AA76 = mulOp (negOp(tmp), AA66);
AA77 = fmnaOp (tmp, AA67, AA77);
AA78 = fmnaOp (tmp, AA68, AA78);
tmp = AA86;
AA80 = fmnaOp (tmp, AA60, AA80);
AA81 = fmnaOp (tmp, AA61, AA81);
AA82 = fmnaOp (tmp, AA62, AA82);
AA83 = fmnaOp (tmp, AA63, AA83);
AA84 = fmnaOp (tmp, AA64, AA84);
AA85 = fmnaOp (tmp, AA65, AA85);
AA86 = mulOp (negOp(tmp), AA66);
AA87 = fmnaOp (tmp, AA67, AA87);
AA88 = fmnaOp (tmp, AA68, AA88);
/****************** iteration 7 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA77);
pvt = 7;
t = absOp (AA87);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 7 */
if (pvt == 8) {
tmp = AA70; AA70 = AA80; AA80 = tmp;
tmp = AA71; AA71 = AA81; AA81 = tmp;
tmp = AA72; AA72 = AA82; AA82 = tmp;
tmp = AA73; AA73 = AA83; AA83 = tmp;
tmp = AA74; AA74 = AA84; AA84 = tmp;
tmp = AA75; AA75 = AA85; AA85 = tmp;
tmp = AA76; AA76 = AA86; AA86 = tmp;
tmp = AA77; AA77 = AA87; AA87 = tmp;
tmp = AA78; AA78 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm7; perm7 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA77);
icol7 = perm7;
AA70 = mulOp (tmp, AA70);
AA71 = mulOp (tmp, AA71);
AA72 = mulOp (tmp, AA72);
AA73 = mulOp (tmp, AA73);
AA74 = mulOp (tmp, AA74);
AA75 = mulOp (tmp, AA75);
AA76 = mulOp (tmp, AA76);
AA77 = tmp;
AA78 = mulOp (tmp, AA78);
/* eliminate above and below current row */
tmp = AA07;
AA00 = fmnaOp (tmp, AA70, AA00);
AA01 = fmnaOp (tmp, AA71, AA01);
AA02 = fmnaOp (tmp, AA72, AA02);
AA03 = fmnaOp (tmp, AA73, AA03);
AA04 = fmnaOp (tmp, AA74, AA04);
AA05 = fmnaOp (tmp, AA75, AA05);
AA06 = fmnaOp (tmp, AA76, AA06);
AA07 = mulOp (negOp(tmp), AA77);
AA08 = fmnaOp (tmp, AA78, AA08);
tmp = AA17;
AA10 = fmnaOp (tmp, AA70, AA10);
AA11 = fmnaOp (tmp, AA71, AA11);
AA12 = fmnaOp (tmp, AA72, AA12);
AA13 = fmnaOp (tmp, AA73, AA13);
AA14 = fmnaOp (tmp, AA74, AA14);
AA15 = fmnaOp (tmp, AA75, AA15);
AA16 = fmnaOp (tmp, AA76, AA16);
AA17 = mulOp (negOp(tmp), AA77);
AA18 = fmnaOp (tmp, AA78, AA18);
tmp = AA27;
AA20 = fmnaOp (tmp, AA70, AA20);
AA21 = fmnaOp (tmp, AA71, AA21);
AA22 = fmnaOp (tmp, AA72, AA22);
AA23 = fmnaOp (tmp, AA73, AA23);
AA24 = fmnaOp (tmp, AA74, AA24);
AA25 = fmnaOp (tmp, AA75, AA25);
AA26 = fmnaOp (tmp, AA76, AA26);
AA27 = mulOp (negOp(tmp), AA77);
AA28 = fmnaOp (tmp, AA78, AA28);
tmp = AA37;
AA30 = fmnaOp (tmp, AA70, AA30);
AA31 = fmnaOp (tmp, AA71, AA31);
AA32 = fmnaOp (tmp, AA72, AA32);
AA33 = fmnaOp (tmp, AA73, AA33);
AA34 = fmnaOp (tmp, AA74, AA34);
AA35 = fmnaOp (tmp, AA75, AA35);
AA36 = fmnaOp (tmp, AA76, AA36);
AA37 = mulOp (negOp(tmp), AA77);
AA38 = fmnaOp (tmp, AA78, AA38);
tmp = AA47;
AA40 = fmnaOp (tmp, AA70, AA40);
AA41 = fmnaOp (tmp, AA71, AA41);
AA42 = fmnaOp (tmp, AA72, AA42);
AA43 = fmnaOp (tmp, AA73, AA43);
AA44 = fmnaOp (tmp, AA74, AA44);
AA45 = fmnaOp (tmp, AA75, AA45);
AA46 = fmnaOp (tmp, AA76, AA46);
AA47 = mulOp (negOp(tmp), AA77);
AA48 = fmnaOp (tmp, AA78, AA48);
tmp = AA57;
AA50 = fmnaOp (tmp, AA70, AA50);
AA51 = fmnaOp (tmp, AA71, AA51);
AA52 = fmnaOp (tmp, AA72, AA52);
AA53 = fmnaOp (tmp, AA73, AA53);
AA54 = fmnaOp (tmp, AA74, AA54);
AA55 = fmnaOp (tmp, AA75, AA55);
AA56 = fmnaOp (tmp, AA76, AA56);
AA57 = mulOp (negOp(tmp), AA77);
AA58 = fmnaOp (tmp, AA78, AA58);
tmp = AA67;
AA60 = fmnaOp (tmp, AA70, AA60);
AA61 = fmnaOp (tmp, AA71, AA61);
AA62 = fmnaOp (tmp, AA72, AA62);
AA63 = fmnaOp (tmp, AA73, AA63);
AA64 = fmnaOp (tmp, AA74, AA64);
AA65 = fmnaOp (tmp, AA75, AA65);
AA66 = fmnaOp (tmp, AA76, AA66);
AA67 = mulOp (negOp(tmp), AA77);
AA68 = fmnaOp (tmp, AA78, AA68);
tmp = AA87;
AA80 = fmnaOp (tmp, AA70, AA80);
AA81 = fmnaOp (tmp, AA71, AA81);
AA82 = fmnaOp (tmp, AA72, AA82);
AA83 = fmnaOp (tmp, AA73, AA83);
AA84 = fmnaOp (tmp, AA74, AA84);
AA85 = fmnaOp (tmp, AA75, AA85);
AA86 = fmnaOp (tmp, AA76, AA86);
AA87 = mulOp (negOp(tmp), AA77);
AA88 = fmnaOp (tmp, AA78, AA88);
/****************** iteration 8 ****************/
/* scale current row */
tmp = rcpOp (AA88);
icol8 = perm8;
AA80 = mulOp (tmp, AA80);
AA81 = mulOp (tmp, AA81);
AA82 = mulOp (tmp, AA82);
AA83 = mulOp (tmp, AA83);
AA84 = mulOp (tmp, AA84);
AA85 = mulOp (tmp, AA85);
AA86 = mulOp (tmp, AA86);
AA87 = mulOp (tmp, AA87);
AA88 = tmp;
/* eliminate above and below current row */
tmp = AA08;
AA00 = fmnaOp (tmp, AA80, AA00);
AA01 = fmnaOp (tmp, AA81, AA01);
AA02 = fmnaOp (tmp, AA82, AA02);
AA03 = fmnaOp (tmp, AA83, AA03);
AA04 = fmnaOp (tmp, AA84, AA04);
AA05 = fmnaOp (tmp, AA85, AA05);
AA06 = fmnaOp (tmp, AA86, AA06);
AA07 = fmnaOp (tmp, AA87, AA07);
AA08 = mulOp (negOp(tmp), AA88);
tmp = AA18;
AA10 = fmnaOp (tmp, AA80, AA10);
AA11 = fmnaOp (tmp, AA81, AA11);
AA12 = fmnaOp (tmp, AA82, AA12);
AA13 = fmnaOp (tmp, AA83, AA13);
AA14 = fmnaOp (tmp, AA84, AA14);
AA15 = fmnaOp (tmp, AA85, AA15);
AA16 = fmnaOp (tmp, AA86, AA16);
AA17 = fmnaOp (tmp, AA87, AA17);
AA18 = mulOp (negOp(tmp), AA88);
tmp = AA28;
AA20 = fmnaOp (tmp, AA80, AA20);
AA21 = fmnaOp (tmp, AA81, AA21);
AA22 = fmnaOp (tmp, AA82, AA22);
AA23 = fmnaOp (tmp, AA83, AA23);
AA24 = fmnaOp (tmp, AA84, AA24);
AA25 = fmnaOp (tmp, AA85, AA25);
AA26 = fmnaOp (tmp, AA86, AA26);
AA27 = fmnaOp (tmp, AA87, AA27);
AA28 = mulOp (negOp(tmp), AA88);
tmp = AA38;
AA30 = fmnaOp (tmp, AA80, AA30);
AA31 = fmnaOp (tmp, AA81, AA31);
AA32 = fmnaOp (tmp, AA82, AA32);
AA33 = fmnaOp (tmp, AA83, AA33);
AA34 = fmnaOp (tmp, AA84, AA34);
AA35 = fmnaOp (tmp, AA85, AA35);
AA36 = fmnaOp (tmp, AA86, AA36);
AA37 = fmnaOp (tmp, AA87, AA37);
AA38 = mulOp (negOp(tmp), AA88);
tmp = AA48;
AA40 = fmnaOp (tmp, AA80, AA40);
AA41 = fmnaOp (tmp, AA81, AA41);
AA42 = fmnaOp (tmp, AA82, AA42);
AA43 = fmnaOp (tmp, AA83, AA43);
AA44 = fmnaOp (tmp, AA84, AA44);
AA45 = fmnaOp (tmp, AA85, AA45);
AA46 = fmnaOp (tmp, AA86, AA46);
AA47 = fmnaOp (tmp, AA87, AA47);
AA48 = mulOp (negOp(tmp), AA88);
tmp = AA58;
AA50 = fmnaOp (tmp, AA80, AA50);
AA51 = fmnaOp (tmp, AA81, AA51);
AA52 = fmnaOp (tmp, AA82, AA52);
AA53 = fmnaOp (tmp, AA83, AA53);
AA54 = fmnaOp (tmp, AA84, AA54);
AA55 = fmnaOp (tmp, AA85, AA55);
AA56 = fmnaOp (tmp, AA86, AA56);
AA57 = fmnaOp (tmp, AA87, AA57);
AA58 = mulOp (negOp(tmp), AA88);
tmp = AA68;
AA60 = fmnaOp (tmp, AA80, AA60);
AA61 = fmnaOp (tmp, AA81, AA61);
AA62 = fmnaOp (tmp, AA82, AA62);
AA63 = fmnaOp (tmp, AA83, AA63);
AA64 = fmnaOp (tmp, AA84, AA64);
AA65 = fmnaOp (tmp, AA85, AA65);
AA66 = fmnaOp (tmp, AA86, AA66);
AA67 = fmnaOp (tmp, AA87, AA67);
AA68 = mulOp (negOp(tmp), AA88);
tmp = AA78;
AA70 = fmnaOp (tmp, AA80, AA70);
AA71 = fmnaOp (tmp, AA81, AA71);
AA72 = fmnaOp (tmp, AA82, AA72);
AA73 = fmnaOp (tmp, AA83, AA73);
AA74 = fmnaOp (tmp, AA84, AA74);
AA75 = fmnaOp (tmp, AA85, AA75);
AA76 = fmnaOp (tmp, AA86, AA76);
AA77 = fmnaOp (tmp, AA87, AA77);
AA78 = mulOp (negOp(tmp), AA88);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(7,icol0) = AA70;
Ainv(8,icol0) = AA80;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(7,icol1) = AA71;
Ainv(8,icol1) = AA81;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(7,icol2) = AA72;
Ainv(8,icol2) = AA82;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(7,icol3) = AA73;
Ainv(8,icol3) = AA83;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(7,icol4) = AA74;
Ainv(8,icol4) = AA84;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(7,icol5) = AA75;
Ainv(8,icol5) = AA85;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
Ainv(7,icol6) = AA76;
Ainv(8,icol6) = AA86;
Ainv(0,icol7) = AA07;
Ainv(1,icol7) = AA17;
Ainv(2,icol7) = AA27;
Ainv(3,icol7) = AA37;
Ainv(4,icol7) = AA47;
Ainv(5,icol7) = AA57;
Ainv(6,icol7) = AA67;
Ainv(7,icol7) = AA77;
Ainv(8,icol7) = AA87;
Ainv(0,icol8) = AA08;
Ainv(1,icol8) = AA18;
Ainv(2,icol8) = AA28;
Ainv(3,icol8) = AA38;
Ainv(4,icol8) = AA48;
Ainv(5,icol8) = AA58;
Ainv(6,icol8) = AA68;
Ainv(7,icol8) = AA78;
Ainv(8,icol8) = AA88;
}
}
template<typename T, int arch>
__global__ void matinv_10x10_matrix_per_thread (const T *A, T *Ainv, int batch)
{
/* This is a hack. The instatiation of this template functions fails when
arch = ARCH_SM13 and T = hipDoubleComplex, since the generated code needs
more than 16KB of local memory. Since we don't need an instance of this
template function on either sm_13 nor sm_20, simply compile out all code
in the function when T = hipDoubleComplex.
*/
if (!isDoubleComplex<T>()) {
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 10;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6, perm7, perm8, perm9;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6, icol7, icol8, icol9;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06, AA07, AA08, AA09;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16, AA17, AA18, AA19;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26, AA27, AA28, AA29;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36, AA37, AA38, AA39;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46, AA47, AA48, AA49;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56, AA57, AA58, AA59;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66, AA67, AA68, AA69;
T AA70, AA71, AA72, AA73, AA74, AA75, AA76, AA77, AA78, AA79;
T AA80, AA81, AA82, AA83, AA84, AA85, AA86, AA87, AA88, AA89;
T AA90, AA91, AA92, AA93, AA94, AA95, AA96, AA97, AA98, AA99;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA70 = A[7];
AA80 = A[8];
AA90 = A[9];
AA01 = A[10];
AA11 = A[11];
AA21 = A[12];
AA31 = A[13];
AA41 = A[14];
AA51 = A[15];
AA61 = A[16];
AA71 = A[17];
AA81 = A[18];
AA91 = A[19];
AA02 = A[20];
AA12 = A[21];
AA22 = A[22];
AA32 = A[23];
AA42 = A[24];
AA52 = A[25];
AA62 = A[26];
AA72 = A[27];
AA82 = A[28];
AA92 = A[29];
AA03 = A[30];
AA13 = A[31];
AA23 = A[32];
AA33 = A[33];
AA43 = A[34];
AA53 = A[35];
AA63 = A[36];
AA73 = A[37];
AA83 = A[38];
AA93 = A[39];
AA04 = A[40];
AA14 = A[41];
AA24 = A[42];
AA34 = A[43];
AA44 = A[44];
AA54 = A[45];
AA64 = A[46];
AA74 = A[47];
AA84 = A[48];
AA94 = A[49];
AA05 = A[50];
AA15 = A[51];
AA25 = A[52];
AA35 = A[53];
AA45 = A[54];
AA55 = A[55];
AA65 = A[56];
AA75 = A[57];
AA85 = A[58];
AA95 = A[59];
AA06 = A[60];
AA16 = A[61];
AA26 = A[62];
AA36 = A[63];
AA46 = A[64];
AA56 = A[65];
AA66 = A[66];
AA76 = A[67];
AA86 = A[68];
AA96 = A[69];
AA07 = A[70];
AA17 = A[71];
AA27 = A[72];
AA37 = A[73];
AA47 = A[74];
AA57 = A[75];
AA67 = A[76];
AA77 = A[77];
AA87 = A[78];
AA97 = A[79];
AA08 = A[80];
AA18 = A[81];
AA28 = A[82];
AA38 = A[83];
AA48 = A[84];
AA58 = A[85];
AA68 = A[86];
AA78 = A[87];
AA88 = A[88];
AA98 = A[89];
AA09 = A[90];
AA19 = A[91];
AA29 = A[92];
AA39 = A[93];
AA49 = A[94];
AA59 = A[95];
AA69 = A[96];
AA79 = A[97];
AA89 = A[98];
AA99 = A[99];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
perm7 = 7;
perm8 = 8;
perm9 = 9;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA70);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA80);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA90);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
tmp = AA07; AA07 = AA17; AA17 = tmp;
tmp = AA08; AA08 = AA18; AA18 = tmp;
tmp = AA09; AA09 = AA19; AA19 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
tmp = AA07; AA07 = AA27; AA27 = tmp;
tmp = AA08; AA08 = AA28; AA28 = tmp;
tmp = AA09; AA09 = AA29; AA29 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
tmp = AA07; AA07 = AA37; AA37 = tmp;
tmp = AA08; AA08 = AA38; AA38 = tmp;
tmp = AA09; AA09 = AA39; AA39 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
tmp = AA07; AA07 = AA47; AA47 = tmp;
tmp = AA08; AA08 = AA48; AA48 = tmp;
tmp = AA09; AA09 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
tmp = AA07; AA07 = AA57; AA57 = tmp;
tmp = AA08; AA08 = AA58; AA58 = tmp;
tmp = AA09; AA09 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
tmp = AA07; AA07 = AA67; AA67 = tmp;
tmp = AA08; AA08 = AA68; AA68 = tmp;
tmp = AA09; AA09 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA00; AA00 = AA70; AA70 = tmp;
tmp = AA01; AA01 = AA71; AA71 = tmp;
tmp = AA02; AA02 = AA72; AA72 = tmp;
tmp = AA03; AA03 = AA73; AA73 = tmp;
tmp = AA04; AA04 = AA74; AA74 = tmp;
tmp = AA05; AA05 = AA75; AA75 = tmp;
tmp = AA06; AA06 = AA76; AA76 = tmp;
tmp = AA07; AA07 = AA77; AA77 = tmp;
tmp = AA08; AA08 = AA78; AA78 = tmp;
tmp = AA09; AA09 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA00; AA00 = AA80; AA80 = tmp;
tmp = AA01; AA01 = AA81; AA81 = tmp;
tmp = AA02; AA02 = AA82; AA82 = tmp;
tmp = AA03; AA03 = AA83; AA83 = tmp;
tmp = AA04; AA04 = AA84; AA84 = tmp;
tmp = AA05; AA05 = AA85; AA85 = tmp;
tmp = AA06; AA06 = AA86; AA86 = tmp;
tmp = AA07; AA07 = AA87; AA87 = tmp;
tmp = AA08; AA08 = AA88; AA88 = tmp;
tmp = AA09; AA09 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA00; AA00 = AA90; AA90 = tmp;
tmp = AA01; AA01 = AA91; AA91 = tmp;
tmp = AA02; AA02 = AA92; AA92 = tmp;
tmp = AA03; AA03 = AA93; AA93 = tmp;
tmp = AA04; AA04 = AA94; AA94 = tmp;
tmp = AA05; AA05 = AA95; AA95 = tmp;
tmp = AA06; AA06 = AA96; AA96 = tmp;
tmp = AA07; AA07 = AA97; AA97 = tmp;
tmp = AA08; AA08 = AA98; AA98 = tmp;
tmp = AA09; AA09 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
AA07 = mulOp (tmp, AA07);
AA08 = mulOp (tmp, AA08);
AA09 = mulOp (tmp, AA09);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
AA17 = fmnaOp (tmp, AA07, AA17);
AA18 = fmnaOp (tmp, AA08, AA18);
AA19 = fmnaOp (tmp, AA09, AA19);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
AA27 = fmnaOp (tmp, AA07, AA27);
AA28 = fmnaOp (tmp, AA08, AA28);
AA29 = fmnaOp (tmp, AA09, AA29);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
AA37 = fmnaOp (tmp, AA07, AA37);
AA38 = fmnaOp (tmp, AA08, AA38);
AA39 = fmnaOp (tmp, AA09, AA39);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
AA47 = fmnaOp (tmp, AA07, AA47);
AA48 = fmnaOp (tmp, AA08, AA48);
AA49 = fmnaOp (tmp, AA09, AA49);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
AA57 = fmnaOp (tmp, AA07, AA57);
AA58 = fmnaOp (tmp, AA08, AA58);
AA59 = fmnaOp (tmp, AA09, AA59);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
AA67 = fmnaOp (tmp, AA07, AA67);
AA68 = fmnaOp (tmp, AA08, AA68);
AA69 = fmnaOp (tmp, AA09, AA69);
tmp = AA70;
AA70 = mulOp (negOp(tmp), AA00);
AA71 = fmnaOp (tmp, AA01, AA71);
AA72 = fmnaOp (tmp, AA02, AA72);
AA73 = fmnaOp (tmp, AA03, AA73);
AA74 = fmnaOp (tmp, AA04, AA74);
AA75 = fmnaOp (tmp, AA05, AA75);
AA76 = fmnaOp (tmp, AA06, AA76);
AA77 = fmnaOp (tmp, AA07, AA77);
AA78 = fmnaOp (tmp, AA08, AA78);
AA79 = fmnaOp (tmp, AA09, AA79);
tmp = AA80;
AA80 = mulOp (negOp(tmp), AA00);
AA81 = fmnaOp (tmp, AA01, AA81);
AA82 = fmnaOp (tmp, AA02, AA82);
AA83 = fmnaOp (tmp, AA03, AA83);
AA84 = fmnaOp (tmp, AA04, AA84);
AA85 = fmnaOp (tmp, AA05, AA85);
AA86 = fmnaOp (tmp, AA06, AA86);
AA87 = fmnaOp (tmp, AA07, AA87);
AA88 = fmnaOp (tmp, AA08, AA88);
AA89 = fmnaOp (tmp, AA09, AA89);
tmp = AA90;
AA90 = mulOp (negOp(tmp), AA00);
AA91 = fmnaOp (tmp, AA01, AA91);
AA92 = fmnaOp (tmp, AA02, AA92);
AA93 = fmnaOp (tmp, AA03, AA93);
AA94 = fmnaOp (tmp, AA04, AA94);
AA95 = fmnaOp (tmp, AA05, AA95);
AA96 = fmnaOp (tmp, AA06, AA96);
AA97 = fmnaOp (tmp, AA07, AA97);
AA98 = fmnaOp (tmp, AA08, AA98);
AA99 = fmnaOp (tmp, AA09, AA99);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA71);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA81);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA91);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
tmp = AA17; AA17 = AA27; AA27 = tmp;
tmp = AA18; AA18 = AA28; AA28 = tmp;
tmp = AA19; AA19 = AA29; AA29 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
tmp = AA17; AA17 = AA37; AA37 = tmp;
tmp = AA18; AA18 = AA38; AA38 = tmp;
tmp = AA19; AA19 = AA39; AA39 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
tmp = AA17; AA17 = AA47; AA47 = tmp;
tmp = AA18; AA18 = AA48; AA48 = tmp;
tmp = AA19; AA19 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
tmp = AA17; AA17 = AA57; AA57 = tmp;
tmp = AA18; AA18 = AA58; AA58 = tmp;
tmp = AA19; AA19 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
tmp = AA17; AA17 = AA67; AA67 = tmp;
tmp = AA18; AA18 = AA68; AA68 = tmp;
tmp = AA19; AA19 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA10; AA10 = AA70; AA70 = tmp;
tmp = AA11; AA11 = AA71; AA71 = tmp;
tmp = AA12; AA12 = AA72; AA72 = tmp;
tmp = AA13; AA13 = AA73; AA73 = tmp;
tmp = AA14; AA14 = AA74; AA74 = tmp;
tmp = AA15; AA15 = AA75; AA75 = tmp;
tmp = AA16; AA16 = AA76; AA76 = tmp;
tmp = AA17; AA17 = AA77; AA77 = tmp;
tmp = AA18; AA18 = AA78; AA78 = tmp;
tmp = AA19; AA19 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA10; AA10 = AA80; AA80 = tmp;
tmp = AA11; AA11 = AA81; AA81 = tmp;
tmp = AA12; AA12 = AA82; AA82 = tmp;
tmp = AA13; AA13 = AA83; AA83 = tmp;
tmp = AA14; AA14 = AA84; AA84 = tmp;
tmp = AA15; AA15 = AA85; AA85 = tmp;
tmp = AA16; AA16 = AA86; AA86 = tmp;
tmp = AA17; AA17 = AA87; AA87 = tmp;
tmp = AA18; AA18 = AA88; AA88 = tmp;
tmp = AA19; AA19 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA10; AA10 = AA90; AA90 = tmp;
tmp = AA11; AA11 = AA91; AA91 = tmp;
tmp = AA12; AA12 = AA92; AA92 = tmp;
tmp = AA13; AA13 = AA93; AA93 = tmp;
tmp = AA14; AA14 = AA94; AA94 = tmp;
tmp = AA15; AA15 = AA95; AA95 = tmp;
tmp = AA16; AA16 = AA96; AA96 = tmp;
tmp = AA17; AA17 = AA97; AA97 = tmp;
tmp = AA18; AA18 = AA98; AA98 = tmp;
tmp = AA19; AA19 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
AA17 = mulOp (tmp, AA17);
AA18 = mulOp (tmp, AA18);
AA19 = mulOp (tmp, AA19);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
AA07 = fmnaOp (tmp, AA17, AA07);
AA08 = fmnaOp (tmp, AA18, AA08);
AA09 = fmnaOp (tmp, AA19, AA09);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
AA27 = fmnaOp (tmp, AA17, AA27);
AA28 = fmnaOp (tmp, AA18, AA28);
AA29 = fmnaOp (tmp, AA19, AA29);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
AA37 = fmnaOp (tmp, AA17, AA37);
AA38 = fmnaOp (tmp, AA18, AA38);
AA39 = fmnaOp (tmp, AA19, AA39);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
AA47 = fmnaOp (tmp, AA17, AA47);
AA48 = fmnaOp (tmp, AA18, AA48);
AA49 = fmnaOp (tmp, AA19, AA49);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
AA57 = fmnaOp (tmp, AA17, AA57);
AA58 = fmnaOp (tmp, AA18, AA58);
AA59 = fmnaOp (tmp, AA19, AA59);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
AA67 = fmnaOp (tmp, AA17, AA67);
AA68 = fmnaOp (tmp, AA18, AA68);
AA69 = fmnaOp (tmp, AA19, AA69);
tmp = AA71;
AA70 = fmnaOp (tmp, AA10, AA70);
AA71 = mulOp (negOp(tmp), AA11);
AA72 = fmnaOp (tmp, AA12, AA72);
AA73 = fmnaOp (tmp, AA13, AA73);
AA74 = fmnaOp (tmp, AA14, AA74);
AA75 = fmnaOp (tmp, AA15, AA75);
AA76 = fmnaOp (tmp, AA16, AA76);
AA77 = fmnaOp (tmp, AA17, AA77);
AA78 = fmnaOp (tmp, AA18, AA78);
AA79 = fmnaOp (tmp, AA19, AA79);
tmp = AA81;
AA80 = fmnaOp (tmp, AA10, AA80);
AA81 = mulOp (negOp(tmp), AA11);
AA82 = fmnaOp (tmp, AA12, AA82);
AA83 = fmnaOp (tmp, AA13, AA83);
AA84 = fmnaOp (tmp, AA14, AA84);
AA85 = fmnaOp (tmp, AA15, AA85);
AA86 = fmnaOp (tmp, AA16, AA86);
AA87 = fmnaOp (tmp, AA17, AA87);
AA88 = fmnaOp (tmp, AA18, AA88);
AA89 = fmnaOp (tmp, AA19, AA89);
tmp = AA91;
AA90 = fmnaOp (tmp, AA10, AA90);
AA91 = mulOp (negOp(tmp), AA11);
AA92 = fmnaOp (tmp, AA12, AA92);
AA93 = fmnaOp (tmp, AA13, AA93);
AA94 = fmnaOp (tmp, AA14, AA94);
AA95 = fmnaOp (tmp, AA15, AA95);
AA96 = fmnaOp (tmp, AA16, AA96);
AA97 = fmnaOp (tmp, AA17, AA97);
AA98 = fmnaOp (tmp, AA18, AA98);
AA99 = fmnaOp (tmp, AA19, AA99);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA72);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA82);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA92);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
tmp = AA27; AA27 = AA37; AA37 = tmp;
tmp = AA28; AA28 = AA38; AA38 = tmp;
tmp = AA29; AA29 = AA39; AA39 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
tmp = AA27; AA27 = AA47; AA47 = tmp;
tmp = AA28; AA28 = AA48; AA48 = tmp;
tmp = AA29; AA29 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
tmp = AA27; AA27 = AA57; AA57 = tmp;
tmp = AA28; AA28 = AA58; AA58 = tmp;
tmp = AA29; AA29 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
tmp = AA27; AA27 = AA67; AA67 = tmp;
tmp = AA28; AA28 = AA68; AA68 = tmp;
tmp = AA29; AA29 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA20; AA20 = AA70; AA70 = tmp;
tmp = AA21; AA21 = AA71; AA71 = tmp;
tmp = AA22; AA22 = AA72; AA72 = tmp;
tmp = AA23; AA23 = AA73; AA73 = tmp;
tmp = AA24; AA24 = AA74; AA74 = tmp;
tmp = AA25; AA25 = AA75; AA75 = tmp;
tmp = AA26; AA26 = AA76; AA76 = tmp;
tmp = AA27; AA27 = AA77; AA77 = tmp;
tmp = AA28; AA28 = AA78; AA78 = tmp;
tmp = AA29; AA29 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA20; AA20 = AA80; AA80 = tmp;
tmp = AA21; AA21 = AA81; AA81 = tmp;
tmp = AA22; AA22 = AA82; AA82 = tmp;
tmp = AA23; AA23 = AA83; AA83 = tmp;
tmp = AA24; AA24 = AA84; AA84 = tmp;
tmp = AA25; AA25 = AA85; AA85 = tmp;
tmp = AA26; AA26 = AA86; AA86 = tmp;
tmp = AA27; AA27 = AA87; AA87 = tmp;
tmp = AA28; AA28 = AA88; AA88 = tmp;
tmp = AA29; AA29 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA20; AA20 = AA90; AA90 = tmp;
tmp = AA21; AA21 = AA91; AA91 = tmp;
tmp = AA22; AA22 = AA92; AA92 = tmp;
tmp = AA23; AA23 = AA93; AA93 = tmp;
tmp = AA24; AA24 = AA94; AA94 = tmp;
tmp = AA25; AA25 = AA95; AA95 = tmp;
tmp = AA26; AA26 = AA96; AA96 = tmp;
tmp = AA27; AA27 = AA97; AA97 = tmp;
tmp = AA28; AA28 = AA98; AA98 = tmp;
tmp = AA29; AA29 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
AA27 = mulOp (tmp, AA27);
AA28 = mulOp (tmp, AA28);
AA29 = mulOp (tmp, AA29);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
AA07 = fmnaOp (tmp, AA27, AA07);
AA08 = fmnaOp (tmp, AA28, AA08);
AA09 = fmnaOp (tmp, AA29, AA09);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
AA17 = fmnaOp (tmp, AA27, AA17);
AA18 = fmnaOp (tmp, AA28, AA18);
AA19 = fmnaOp (tmp, AA29, AA19);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
AA37 = fmnaOp (tmp, AA27, AA37);
AA38 = fmnaOp (tmp, AA28, AA38);
AA39 = fmnaOp (tmp, AA29, AA39);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
AA47 = fmnaOp (tmp, AA27, AA47);
AA48 = fmnaOp (tmp, AA28, AA48);
AA49 = fmnaOp (tmp, AA29, AA49);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
AA57 = fmnaOp (tmp, AA27, AA57);
AA58 = fmnaOp (tmp, AA28, AA58);
AA59 = fmnaOp (tmp, AA29, AA59);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
AA67 = fmnaOp (tmp, AA27, AA67);
AA68 = fmnaOp (tmp, AA28, AA68);
AA69 = fmnaOp (tmp, AA29, AA69);
tmp = AA72;
AA70 = fmnaOp (tmp, AA20, AA70);
AA71 = fmnaOp (tmp, AA21, AA71);
AA72 = mulOp (negOp(tmp), AA22);
AA73 = fmnaOp (tmp, AA23, AA73);
AA74 = fmnaOp (tmp, AA24, AA74);
AA75 = fmnaOp (tmp, AA25, AA75);
AA76 = fmnaOp (tmp, AA26, AA76);
AA77 = fmnaOp (tmp, AA27, AA77);
AA78 = fmnaOp (tmp, AA28, AA78);
AA79 = fmnaOp (tmp, AA29, AA79);
tmp = AA82;
AA80 = fmnaOp (tmp, AA20, AA80);
AA81 = fmnaOp (tmp, AA21, AA81);
AA82 = mulOp (negOp(tmp), AA22);
AA83 = fmnaOp (tmp, AA23, AA83);
AA84 = fmnaOp (tmp, AA24, AA84);
AA85 = fmnaOp (tmp, AA25, AA85);
AA86 = fmnaOp (tmp, AA26, AA86);
AA87 = fmnaOp (tmp, AA27, AA87);
AA88 = fmnaOp (tmp, AA28, AA88);
AA89 = fmnaOp (tmp, AA29, AA89);
tmp = AA92;
AA90 = fmnaOp (tmp, AA20, AA90);
AA91 = fmnaOp (tmp, AA21, AA91);
AA92 = mulOp (negOp(tmp), AA22);
AA93 = fmnaOp (tmp, AA23, AA93);
AA94 = fmnaOp (tmp, AA24, AA94);
AA95 = fmnaOp (tmp, AA25, AA95);
AA96 = fmnaOp (tmp, AA26, AA96);
AA97 = fmnaOp (tmp, AA27, AA97);
AA98 = fmnaOp (tmp, AA28, AA98);
AA99 = fmnaOp (tmp, AA29, AA99);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA73);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA83);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA93);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
tmp = AA37; AA37 = AA47; AA47 = tmp;
tmp = AA38; AA38 = AA48; AA48 = tmp;
tmp = AA39; AA39 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
tmp = AA37; AA37 = AA57; AA57 = tmp;
tmp = AA38; AA38 = AA58; AA58 = tmp;
tmp = AA39; AA39 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
tmp = AA37; AA37 = AA67; AA67 = tmp;
tmp = AA38; AA38 = AA68; AA68 = tmp;
tmp = AA39; AA39 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA30; AA30 = AA70; AA70 = tmp;
tmp = AA31; AA31 = AA71; AA71 = tmp;
tmp = AA32; AA32 = AA72; AA72 = tmp;
tmp = AA33; AA33 = AA73; AA73 = tmp;
tmp = AA34; AA34 = AA74; AA74 = tmp;
tmp = AA35; AA35 = AA75; AA75 = tmp;
tmp = AA36; AA36 = AA76; AA76 = tmp;
tmp = AA37; AA37 = AA77; AA77 = tmp;
tmp = AA38; AA38 = AA78; AA78 = tmp;
tmp = AA39; AA39 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA30; AA30 = AA80; AA80 = tmp;
tmp = AA31; AA31 = AA81; AA81 = tmp;
tmp = AA32; AA32 = AA82; AA82 = tmp;
tmp = AA33; AA33 = AA83; AA83 = tmp;
tmp = AA34; AA34 = AA84; AA84 = tmp;
tmp = AA35; AA35 = AA85; AA85 = tmp;
tmp = AA36; AA36 = AA86; AA86 = tmp;
tmp = AA37; AA37 = AA87; AA87 = tmp;
tmp = AA38; AA38 = AA88; AA88 = tmp;
tmp = AA39; AA39 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA30; AA30 = AA90; AA90 = tmp;
tmp = AA31; AA31 = AA91; AA91 = tmp;
tmp = AA32; AA32 = AA92; AA92 = tmp;
tmp = AA33; AA33 = AA93; AA93 = tmp;
tmp = AA34; AA34 = AA94; AA94 = tmp;
tmp = AA35; AA35 = AA95; AA95 = tmp;
tmp = AA36; AA36 = AA96; AA96 = tmp;
tmp = AA37; AA37 = AA97; AA97 = tmp;
tmp = AA38; AA38 = AA98; AA98 = tmp;
tmp = AA39; AA39 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
AA37 = mulOp (tmp, AA37);
AA38 = mulOp (tmp, AA38);
AA39 = mulOp (tmp, AA39);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
AA07 = fmnaOp (tmp, AA37, AA07);
AA08 = fmnaOp (tmp, AA38, AA08);
AA09 = fmnaOp (tmp, AA39, AA09);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
AA17 = fmnaOp (tmp, AA37, AA17);
AA18 = fmnaOp (tmp, AA38, AA18);
AA19 = fmnaOp (tmp, AA39, AA19);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
AA27 = fmnaOp (tmp, AA37, AA27);
AA28 = fmnaOp (tmp, AA38, AA28);
AA29 = fmnaOp (tmp, AA39, AA29);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
AA47 = fmnaOp (tmp, AA37, AA47);
AA48 = fmnaOp (tmp, AA38, AA48);
AA49 = fmnaOp (tmp, AA39, AA49);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
AA57 = fmnaOp (tmp, AA37, AA57);
AA58 = fmnaOp (tmp, AA38, AA58);
AA59 = fmnaOp (tmp, AA39, AA59);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
AA67 = fmnaOp (tmp, AA37, AA67);
AA68 = fmnaOp (tmp, AA38, AA68);
AA69 = fmnaOp (tmp, AA39, AA69);
tmp = AA73;
AA70 = fmnaOp (tmp, AA30, AA70);
AA71 = fmnaOp (tmp, AA31, AA71);
AA72 = fmnaOp (tmp, AA32, AA72);
AA73 = mulOp (negOp(tmp), AA33);
AA74 = fmnaOp (tmp, AA34, AA74);
AA75 = fmnaOp (tmp, AA35, AA75);
AA76 = fmnaOp (tmp, AA36, AA76);
AA77 = fmnaOp (tmp, AA37, AA77);
AA78 = fmnaOp (tmp, AA38, AA78);
AA79 = fmnaOp (tmp, AA39, AA79);
tmp = AA83;
AA80 = fmnaOp (tmp, AA30, AA80);
AA81 = fmnaOp (tmp, AA31, AA81);
AA82 = fmnaOp (tmp, AA32, AA82);
AA83 = mulOp (negOp(tmp), AA33);
AA84 = fmnaOp (tmp, AA34, AA84);
AA85 = fmnaOp (tmp, AA35, AA85);
AA86 = fmnaOp (tmp, AA36, AA86);
AA87 = fmnaOp (tmp, AA37, AA87);
AA88 = fmnaOp (tmp, AA38, AA88);
AA89 = fmnaOp (tmp, AA39, AA89);
tmp = AA93;
AA90 = fmnaOp (tmp, AA30, AA90);
AA91 = fmnaOp (tmp, AA31, AA91);
AA92 = fmnaOp (tmp, AA32, AA92);
AA93 = mulOp (negOp(tmp), AA33);
AA94 = fmnaOp (tmp, AA34, AA94);
AA95 = fmnaOp (tmp, AA35, AA95);
AA96 = fmnaOp (tmp, AA36, AA96);
AA97 = fmnaOp (tmp, AA37, AA97);
AA98 = fmnaOp (tmp, AA38, AA98);
AA99 = fmnaOp (tmp, AA39, AA99);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA74);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA84);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA94);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
tmp = AA47; AA47 = AA57; AA57 = tmp;
tmp = AA48; AA48 = AA58; AA58 = tmp;
tmp = AA49; AA49 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
tmp = AA47; AA47 = AA67; AA67 = tmp;
tmp = AA48; AA48 = AA68; AA68 = tmp;
tmp = AA49; AA49 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA40; AA40 = AA70; AA70 = tmp;
tmp = AA41; AA41 = AA71; AA71 = tmp;
tmp = AA42; AA42 = AA72; AA72 = tmp;
tmp = AA43; AA43 = AA73; AA73 = tmp;
tmp = AA44; AA44 = AA74; AA74 = tmp;
tmp = AA45; AA45 = AA75; AA75 = tmp;
tmp = AA46; AA46 = AA76; AA76 = tmp;
tmp = AA47; AA47 = AA77; AA77 = tmp;
tmp = AA48; AA48 = AA78; AA78 = tmp;
tmp = AA49; AA49 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA40; AA40 = AA80; AA80 = tmp;
tmp = AA41; AA41 = AA81; AA81 = tmp;
tmp = AA42; AA42 = AA82; AA82 = tmp;
tmp = AA43; AA43 = AA83; AA83 = tmp;
tmp = AA44; AA44 = AA84; AA84 = tmp;
tmp = AA45; AA45 = AA85; AA85 = tmp;
tmp = AA46; AA46 = AA86; AA86 = tmp;
tmp = AA47; AA47 = AA87; AA87 = tmp;
tmp = AA48; AA48 = AA88; AA88 = tmp;
tmp = AA49; AA49 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA40; AA40 = AA90; AA90 = tmp;
tmp = AA41; AA41 = AA91; AA91 = tmp;
tmp = AA42; AA42 = AA92; AA92 = tmp;
tmp = AA43; AA43 = AA93; AA93 = tmp;
tmp = AA44; AA44 = AA94; AA94 = tmp;
tmp = AA45; AA45 = AA95; AA95 = tmp;
tmp = AA46; AA46 = AA96; AA96 = tmp;
tmp = AA47; AA47 = AA97; AA97 = tmp;
tmp = AA48; AA48 = AA98; AA98 = tmp;
tmp = AA49; AA49 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
AA47 = mulOp (tmp, AA47);
AA48 = mulOp (tmp, AA48);
AA49 = mulOp (tmp, AA49);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
AA07 = fmnaOp (tmp, AA47, AA07);
AA08 = fmnaOp (tmp, AA48, AA08);
AA09 = fmnaOp (tmp, AA49, AA09);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
AA17 = fmnaOp (tmp, AA47, AA17);
AA18 = fmnaOp (tmp, AA48, AA18);
AA19 = fmnaOp (tmp, AA49, AA19);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
AA27 = fmnaOp (tmp, AA47, AA27);
AA28 = fmnaOp (tmp, AA48, AA28);
AA29 = fmnaOp (tmp, AA49, AA29);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
AA37 = fmnaOp (tmp, AA47, AA37);
AA38 = fmnaOp (tmp, AA48, AA38);
AA39 = fmnaOp (tmp, AA49, AA39);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
AA57 = fmnaOp (tmp, AA47, AA57);
AA58 = fmnaOp (tmp, AA48, AA58);
AA59 = fmnaOp (tmp, AA49, AA59);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
AA67 = fmnaOp (tmp, AA47, AA67);
AA68 = fmnaOp (tmp, AA48, AA68);
AA69 = fmnaOp (tmp, AA49, AA69);
tmp = AA74;
AA70 = fmnaOp (tmp, AA40, AA70);
AA71 = fmnaOp (tmp, AA41, AA71);
AA72 = fmnaOp (tmp, AA42, AA72);
AA73 = fmnaOp (tmp, AA43, AA73);
AA74 = mulOp (negOp(tmp), AA44);
AA75 = fmnaOp (tmp, AA45, AA75);
AA76 = fmnaOp (tmp, AA46, AA76);
AA77 = fmnaOp (tmp, AA47, AA77);
AA78 = fmnaOp (tmp, AA48, AA78);
AA79 = fmnaOp (tmp, AA49, AA79);
tmp = AA84;
AA80 = fmnaOp (tmp, AA40, AA80);
AA81 = fmnaOp (tmp, AA41, AA81);
AA82 = fmnaOp (tmp, AA42, AA82);
AA83 = fmnaOp (tmp, AA43, AA83);
AA84 = mulOp (negOp(tmp), AA44);
AA85 = fmnaOp (tmp, AA45, AA85);
AA86 = fmnaOp (tmp, AA46, AA86);
AA87 = fmnaOp (tmp, AA47, AA87);
AA88 = fmnaOp (tmp, AA48, AA88);
AA89 = fmnaOp (tmp, AA49, AA89);
tmp = AA94;
AA90 = fmnaOp (tmp, AA40, AA90);
AA91 = fmnaOp (tmp, AA41, AA91);
AA92 = fmnaOp (tmp, AA42, AA92);
AA93 = fmnaOp (tmp, AA43, AA93);
AA94 = mulOp (negOp(tmp), AA44);
AA95 = fmnaOp (tmp, AA45, AA95);
AA96 = fmnaOp (tmp, AA46, AA96);
AA97 = fmnaOp (tmp, AA47, AA97);
AA98 = fmnaOp (tmp, AA48, AA98);
AA99 = fmnaOp (tmp, AA49, AA99);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA75);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA85);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA95);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
tmp = AA57; AA57 = AA67; AA67 = tmp;
tmp = AA58; AA58 = AA68; AA68 = tmp;
tmp = AA59; AA59 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA50; AA50 = AA70; AA70 = tmp;
tmp = AA51; AA51 = AA71; AA71 = tmp;
tmp = AA52; AA52 = AA72; AA72 = tmp;
tmp = AA53; AA53 = AA73; AA73 = tmp;
tmp = AA54; AA54 = AA74; AA74 = tmp;
tmp = AA55; AA55 = AA75; AA75 = tmp;
tmp = AA56; AA56 = AA76; AA76 = tmp;
tmp = AA57; AA57 = AA77; AA77 = tmp;
tmp = AA58; AA58 = AA78; AA78 = tmp;
tmp = AA59; AA59 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA50; AA50 = AA80; AA80 = tmp;
tmp = AA51; AA51 = AA81; AA81 = tmp;
tmp = AA52; AA52 = AA82; AA82 = tmp;
tmp = AA53; AA53 = AA83; AA83 = tmp;
tmp = AA54; AA54 = AA84; AA84 = tmp;
tmp = AA55; AA55 = AA85; AA85 = tmp;
tmp = AA56; AA56 = AA86; AA86 = tmp;
tmp = AA57; AA57 = AA87; AA87 = tmp;
tmp = AA58; AA58 = AA88; AA88 = tmp;
tmp = AA59; AA59 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA50; AA50 = AA90; AA90 = tmp;
tmp = AA51; AA51 = AA91; AA91 = tmp;
tmp = AA52; AA52 = AA92; AA92 = tmp;
tmp = AA53; AA53 = AA93; AA93 = tmp;
tmp = AA54; AA54 = AA94; AA94 = tmp;
tmp = AA55; AA55 = AA95; AA95 = tmp;
tmp = AA56; AA56 = AA96; AA96 = tmp;
tmp = AA57; AA57 = AA97; AA97 = tmp;
tmp = AA58; AA58 = AA98; AA98 = tmp;
tmp = AA59; AA59 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
AA57 = mulOp (tmp, AA57);
AA58 = mulOp (tmp, AA58);
AA59 = mulOp (tmp, AA59);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
AA07 = fmnaOp (tmp, AA57, AA07);
AA08 = fmnaOp (tmp, AA58, AA08);
AA09 = fmnaOp (tmp, AA59, AA09);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
AA17 = fmnaOp (tmp, AA57, AA17);
AA18 = fmnaOp (tmp, AA58, AA18);
AA19 = fmnaOp (tmp, AA59, AA19);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
AA27 = fmnaOp (tmp, AA57, AA27);
AA28 = fmnaOp (tmp, AA58, AA28);
AA29 = fmnaOp (tmp, AA59, AA29);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
AA37 = fmnaOp (tmp, AA57, AA37);
AA38 = fmnaOp (tmp, AA58, AA38);
AA39 = fmnaOp (tmp, AA59, AA39);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
AA47 = fmnaOp (tmp, AA57, AA47);
AA48 = fmnaOp (tmp, AA58, AA48);
AA49 = fmnaOp (tmp, AA59, AA49);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
AA67 = fmnaOp (tmp, AA57, AA67);
AA68 = fmnaOp (tmp, AA58, AA68);
AA69 = fmnaOp (tmp, AA59, AA69);
tmp = AA75;
AA70 = fmnaOp (tmp, AA50, AA70);
AA71 = fmnaOp (tmp, AA51, AA71);
AA72 = fmnaOp (tmp, AA52, AA72);
AA73 = fmnaOp (tmp, AA53, AA73);
AA74 = fmnaOp (tmp, AA54, AA74);
AA75 = mulOp (negOp(tmp), AA55);
AA76 = fmnaOp (tmp, AA56, AA76);
AA77 = fmnaOp (tmp, AA57, AA77);
AA78 = fmnaOp (tmp, AA58, AA78);
AA79 = fmnaOp (tmp, AA59, AA79);
tmp = AA85;
AA80 = fmnaOp (tmp, AA50, AA80);
AA81 = fmnaOp (tmp, AA51, AA81);
AA82 = fmnaOp (tmp, AA52, AA82);
AA83 = fmnaOp (tmp, AA53, AA83);
AA84 = fmnaOp (tmp, AA54, AA84);
AA85 = mulOp (negOp(tmp), AA55);
AA86 = fmnaOp (tmp, AA56, AA86);
AA87 = fmnaOp (tmp, AA57, AA87);
AA88 = fmnaOp (tmp, AA58, AA88);
AA89 = fmnaOp (tmp, AA59, AA89);
tmp = AA95;
AA90 = fmnaOp (tmp, AA50, AA90);
AA91 = fmnaOp (tmp, AA51, AA91);
AA92 = fmnaOp (tmp, AA52, AA92);
AA93 = fmnaOp (tmp, AA53, AA93);
AA94 = fmnaOp (tmp, AA54, AA94);
AA95 = mulOp (negOp(tmp), AA55);
AA96 = fmnaOp (tmp, AA56, AA96);
AA97 = fmnaOp (tmp, AA57, AA97);
AA98 = fmnaOp (tmp, AA58, AA98);
AA99 = fmnaOp (tmp, AA59, AA99);
/****************** iteration 6 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA66);
pvt = 6;
t = absOp (AA76);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA86);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA96);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 6 */
if (pvt == 7) {
tmp = AA60; AA60 = AA70; AA70 = tmp;
tmp = AA61; AA61 = AA71; AA71 = tmp;
tmp = AA62; AA62 = AA72; AA72 = tmp;
tmp = AA63; AA63 = AA73; AA73 = tmp;
tmp = AA64; AA64 = AA74; AA74 = tmp;
tmp = AA65; AA65 = AA75; AA75 = tmp;
tmp = AA66; AA66 = AA76; AA76 = tmp;
tmp = AA67; AA67 = AA77; AA77 = tmp;
tmp = AA68; AA68 = AA78; AA78 = tmp;
tmp = AA69; AA69 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA60; AA60 = AA80; AA80 = tmp;
tmp = AA61; AA61 = AA81; AA81 = tmp;
tmp = AA62; AA62 = AA82; AA82 = tmp;
tmp = AA63; AA63 = AA83; AA83 = tmp;
tmp = AA64; AA64 = AA84; AA84 = tmp;
tmp = AA65; AA65 = AA85; AA85 = tmp;
tmp = AA66; AA66 = AA86; AA86 = tmp;
tmp = AA67; AA67 = AA87; AA87 = tmp;
tmp = AA68; AA68 = AA88; AA88 = tmp;
tmp = AA69; AA69 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA60; AA60 = AA90; AA90 = tmp;
tmp = AA61; AA61 = AA91; AA91 = tmp;
tmp = AA62; AA62 = AA92; AA92 = tmp;
tmp = AA63; AA63 = AA93; AA93 = tmp;
tmp = AA64; AA64 = AA94; AA94 = tmp;
tmp = AA65; AA65 = AA95; AA95 = tmp;
tmp = AA66; AA66 = AA96; AA96 = tmp;
tmp = AA67; AA67 = AA97; AA97 = tmp;
tmp = AA68; AA68 = AA98; AA98 = tmp;
tmp = AA69; AA69 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
AA67 = mulOp (tmp, AA67);
AA68 = mulOp (tmp, AA68);
AA69 = mulOp (tmp, AA69);
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
AA07 = fmnaOp (tmp, AA67, AA07);
AA08 = fmnaOp (tmp, AA68, AA08);
AA09 = fmnaOp (tmp, AA69, AA09);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
AA17 = fmnaOp (tmp, AA67, AA17);
AA18 = fmnaOp (tmp, AA68, AA18);
AA19 = fmnaOp (tmp, AA69, AA19);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
AA27 = fmnaOp (tmp, AA67, AA27);
AA28 = fmnaOp (tmp, AA68, AA28);
AA29 = fmnaOp (tmp, AA69, AA29);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
AA37 = fmnaOp (tmp, AA67, AA37);
AA38 = fmnaOp (tmp, AA68, AA38);
AA39 = fmnaOp (tmp, AA69, AA39);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
AA47 = fmnaOp (tmp, AA67, AA47);
AA48 = fmnaOp (tmp, AA68, AA48);
AA49 = fmnaOp (tmp, AA69, AA49);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
AA57 = fmnaOp (tmp, AA67, AA57);
AA58 = fmnaOp (tmp, AA68, AA58);
AA59 = fmnaOp (tmp, AA69, AA59);
tmp = AA76;
AA70 = fmnaOp (tmp, AA60, AA70);
AA71 = fmnaOp (tmp, AA61, AA71);
AA72 = fmnaOp (tmp, AA62, AA72);
AA73 = fmnaOp (tmp, AA63, AA73);
AA74 = fmnaOp (tmp, AA64, AA74);
AA75 = fmnaOp (tmp, AA65, AA75);
AA76 = mulOp (negOp(tmp), AA66);
AA77 = fmnaOp (tmp, AA67, AA77);
AA78 = fmnaOp (tmp, AA68, AA78);
AA79 = fmnaOp (tmp, AA69, AA79);
tmp = AA86;
AA80 = fmnaOp (tmp, AA60, AA80);
AA81 = fmnaOp (tmp, AA61, AA81);
AA82 = fmnaOp (tmp, AA62, AA82);
AA83 = fmnaOp (tmp, AA63, AA83);
AA84 = fmnaOp (tmp, AA64, AA84);
AA85 = fmnaOp (tmp, AA65, AA85);
AA86 = mulOp (negOp(tmp), AA66);
AA87 = fmnaOp (tmp, AA67, AA87);
AA88 = fmnaOp (tmp, AA68, AA88);
AA89 = fmnaOp (tmp, AA69, AA89);
tmp = AA96;
AA90 = fmnaOp (tmp, AA60, AA90);
AA91 = fmnaOp (tmp, AA61, AA91);
AA92 = fmnaOp (tmp, AA62, AA92);
AA93 = fmnaOp (tmp, AA63, AA93);
AA94 = fmnaOp (tmp, AA64, AA94);
AA95 = fmnaOp (tmp, AA65, AA95);
AA96 = mulOp (negOp(tmp), AA66);
AA97 = fmnaOp (tmp, AA67, AA97);
AA98 = fmnaOp (tmp, AA68, AA98);
AA99 = fmnaOp (tmp, AA69, AA99);
/****************** iteration 7 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA77);
pvt = 7;
t = absOp (AA87);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA97);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 7 */
if (pvt == 8) {
tmp = AA70; AA70 = AA80; AA80 = tmp;
tmp = AA71; AA71 = AA81; AA81 = tmp;
tmp = AA72; AA72 = AA82; AA82 = tmp;
tmp = AA73; AA73 = AA83; AA83 = tmp;
tmp = AA74; AA74 = AA84; AA84 = tmp;
tmp = AA75; AA75 = AA85; AA85 = tmp;
tmp = AA76; AA76 = AA86; AA86 = tmp;
tmp = AA77; AA77 = AA87; AA87 = tmp;
tmp = AA78; AA78 = AA88; AA88 = tmp;
tmp = AA79; AA79 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm7; perm7 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA70; AA70 = AA90; AA90 = tmp;
tmp = AA71; AA71 = AA91; AA91 = tmp;
tmp = AA72; AA72 = AA92; AA92 = tmp;
tmp = AA73; AA73 = AA93; AA93 = tmp;
tmp = AA74; AA74 = AA94; AA94 = tmp;
tmp = AA75; AA75 = AA95; AA95 = tmp;
tmp = AA76; AA76 = AA96; AA96 = tmp;
tmp = AA77; AA77 = AA97; AA97 = tmp;
tmp = AA78; AA78 = AA98; AA98 = tmp;
tmp = AA79; AA79 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm7; perm7 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA77);
icol7 = perm7;
AA70 = mulOp (tmp, AA70);
AA71 = mulOp (tmp, AA71);
AA72 = mulOp (tmp, AA72);
AA73 = mulOp (tmp, AA73);
AA74 = mulOp (tmp, AA74);
AA75 = mulOp (tmp, AA75);
AA76 = mulOp (tmp, AA76);
AA77 = tmp;
AA78 = mulOp (tmp, AA78);
AA79 = mulOp (tmp, AA79);
/* eliminate above and below current row */
tmp = AA07;
AA00 = fmnaOp (tmp, AA70, AA00);
AA01 = fmnaOp (tmp, AA71, AA01);
AA02 = fmnaOp (tmp, AA72, AA02);
AA03 = fmnaOp (tmp, AA73, AA03);
AA04 = fmnaOp (tmp, AA74, AA04);
AA05 = fmnaOp (tmp, AA75, AA05);
AA06 = fmnaOp (tmp, AA76, AA06);
AA07 = mulOp (negOp(tmp), AA77);
AA08 = fmnaOp (tmp, AA78, AA08);
AA09 = fmnaOp (tmp, AA79, AA09);
tmp = AA17;
AA10 = fmnaOp (tmp, AA70, AA10);
AA11 = fmnaOp (tmp, AA71, AA11);
AA12 = fmnaOp (tmp, AA72, AA12);
AA13 = fmnaOp (tmp, AA73, AA13);
AA14 = fmnaOp (tmp, AA74, AA14);
AA15 = fmnaOp (tmp, AA75, AA15);
AA16 = fmnaOp (tmp, AA76, AA16);
AA17 = mulOp (negOp(tmp), AA77);
AA18 = fmnaOp (tmp, AA78, AA18);
AA19 = fmnaOp (tmp, AA79, AA19);
tmp = AA27;
AA20 = fmnaOp (tmp, AA70, AA20);
AA21 = fmnaOp (tmp, AA71, AA21);
AA22 = fmnaOp (tmp, AA72, AA22);
AA23 = fmnaOp (tmp, AA73, AA23);
AA24 = fmnaOp (tmp, AA74, AA24);
AA25 = fmnaOp (tmp, AA75, AA25);
AA26 = fmnaOp (tmp, AA76, AA26);
AA27 = mulOp (negOp(tmp), AA77);
AA28 = fmnaOp (tmp, AA78, AA28);
AA29 = fmnaOp (tmp, AA79, AA29);
tmp = AA37;
AA30 = fmnaOp (tmp, AA70, AA30);
AA31 = fmnaOp (tmp, AA71, AA31);
AA32 = fmnaOp (tmp, AA72, AA32);
AA33 = fmnaOp (tmp, AA73, AA33);
AA34 = fmnaOp (tmp, AA74, AA34);
AA35 = fmnaOp (tmp, AA75, AA35);
AA36 = fmnaOp (tmp, AA76, AA36);
AA37 = mulOp (negOp(tmp), AA77);
AA38 = fmnaOp (tmp, AA78, AA38);
AA39 = fmnaOp (tmp, AA79, AA39);
tmp = AA47;
AA40 = fmnaOp (tmp, AA70, AA40);
AA41 = fmnaOp (tmp, AA71, AA41);
AA42 = fmnaOp (tmp, AA72, AA42);
AA43 = fmnaOp (tmp, AA73, AA43);
AA44 = fmnaOp (tmp, AA74, AA44);
AA45 = fmnaOp (tmp, AA75, AA45);
AA46 = fmnaOp (tmp, AA76, AA46);
AA47 = mulOp (negOp(tmp), AA77);
AA48 = fmnaOp (tmp, AA78, AA48);
AA49 = fmnaOp (tmp, AA79, AA49);
tmp = AA57;
AA50 = fmnaOp (tmp, AA70, AA50);
AA51 = fmnaOp (tmp, AA71, AA51);
AA52 = fmnaOp (tmp, AA72, AA52);
AA53 = fmnaOp (tmp, AA73, AA53);
AA54 = fmnaOp (tmp, AA74, AA54);
AA55 = fmnaOp (tmp, AA75, AA55);
AA56 = fmnaOp (tmp, AA76, AA56);
AA57 = mulOp (negOp(tmp), AA77);
AA58 = fmnaOp (tmp, AA78, AA58);
AA59 = fmnaOp (tmp, AA79, AA59);
tmp = AA67;
AA60 = fmnaOp (tmp, AA70, AA60);
AA61 = fmnaOp (tmp, AA71, AA61);
AA62 = fmnaOp (tmp, AA72, AA62);
AA63 = fmnaOp (tmp, AA73, AA63);
AA64 = fmnaOp (tmp, AA74, AA64);
AA65 = fmnaOp (tmp, AA75, AA65);
AA66 = fmnaOp (tmp, AA76, AA66);
AA67 = mulOp (negOp(tmp), AA77);
AA68 = fmnaOp (tmp, AA78, AA68);
AA69 = fmnaOp (tmp, AA79, AA69);
tmp = AA87;
AA80 = fmnaOp (tmp, AA70, AA80);
AA81 = fmnaOp (tmp, AA71, AA81);
AA82 = fmnaOp (tmp, AA72, AA82);
AA83 = fmnaOp (tmp, AA73, AA83);
AA84 = fmnaOp (tmp, AA74, AA84);
AA85 = fmnaOp (tmp, AA75, AA85);
AA86 = fmnaOp (tmp, AA76, AA86);
AA87 = mulOp (negOp(tmp), AA77);
AA88 = fmnaOp (tmp, AA78, AA88);
AA89 = fmnaOp (tmp, AA79, AA89);
tmp = AA97;
AA90 = fmnaOp (tmp, AA70, AA90);
AA91 = fmnaOp (tmp, AA71, AA91);
AA92 = fmnaOp (tmp, AA72, AA92);
AA93 = fmnaOp (tmp, AA73, AA93);
AA94 = fmnaOp (tmp, AA74, AA94);
AA95 = fmnaOp (tmp, AA75, AA95);
AA96 = fmnaOp (tmp, AA76, AA96);
AA97 = mulOp (negOp(tmp), AA77);
AA98 = fmnaOp (tmp, AA78, AA98);
AA99 = fmnaOp (tmp, AA79, AA99);
/****************** iteration 8 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA88);
pvt = 8;
t = absOp (AA98);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 8 */
if (pvt == 9) {
tmp = AA80; AA80 = AA90; AA90 = tmp;
tmp = AA81; AA81 = AA91; AA91 = tmp;
tmp = AA82; AA82 = AA92; AA92 = tmp;
tmp = AA83; AA83 = AA93; AA93 = tmp;
tmp = AA84; AA84 = AA94; AA94 = tmp;
tmp = AA85; AA85 = AA95; AA95 = tmp;
tmp = AA86; AA86 = AA96; AA96 = tmp;
tmp = AA87; AA87 = AA97; AA97 = tmp;
tmp = AA88; AA88 = AA98; AA98 = tmp;
tmp = AA89; AA89 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm8; perm8 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA88);
icol8 = perm8;
AA80 = mulOp (tmp, AA80);
AA81 = mulOp (tmp, AA81);
AA82 = mulOp (tmp, AA82);
AA83 = mulOp (tmp, AA83);
AA84 = mulOp (tmp, AA84);
AA85 = mulOp (tmp, AA85);
AA86 = mulOp (tmp, AA86);
AA87 = mulOp (tmp, AA87);
AA88 = tmp;
AA89 = mulOp (tmp, AA89);
/* eliminate above and below current row */
tmp = AA08;
AA00 = fmnaOp (tmp, AA80, AA00);
AA01 = fmnaOp (tmp, AA81, AA01);
AA02 = fmnaOp (tmp, AA82, AA02);
AA03 = fmnaOp (tmp, AA83, AA03);
AA04 = fmnaOp (tmp, AA84, AA04);
AA05 = fmnaOp (tmp, AA85, AA05);
AA06 = fmnaOp (tmp, AA86, AA06);
AA07 = fmnaOp (tmp, AA87, AA07);
AA08 = mulOp (negOp(tmp), AA88);
AA09 = fmnaOp (tmp, AA89, AA09);
tmp = AA18;
AA10 = fmnaOp (tmp, AA80, AA10);
AA11 = fmnaOp (tmp, AA81, AA11);
AA12 = fmnaOp (tmp, AA82, AA12);
AA13 = fmnaOp (tmp, AA83, AA13);
AA14 = fmnaOp (tmp, AA84, AA14);
AA15 = fmnaOp (tmp, AA85, AA15);
AA16 = fmnaOp (tmp, AA86, AA16);
AA17 = fmnaOp (tmp, AA87, AA17);
AA18 = mulOp (negOp(tmp), AA88);
AA19 = fmnaOp (tmp, AA89, AA19);
tmp = AA28;
AA20 = fmnaOp (tmp, AA80, AA20);
AA21 = fmnaOp (tmp, AA81, AA21);
AA22 = fmnaOp (tmp, AA82, AA22);
AA23 = fmnaOp (tmp, AA83, AA23);
AA24 = fmnaOp (tmp, AA84, AA24);
AA25 = fmnaOp (tmp, AA85, AA25);
AA26 = fmnaOp (tmp, AA86, AA26);
AA27 = fmnaOp (tmp, AA87, AA27);
AA28 = mulOp (negOp(tmp), AA88);
AA29 = fmnaOp (tmp, AA89, AA29);
tmp = AA38;
AA30 = fmnaOp (tmp, AA80, AA30);
AA31 = fmnaOp (tmp, AA81, AA31);
AA32 = fmnaOp (tmp, AA82, AA32);
AA33 = fmnaOp (tmp, AA83, AA33);
AA34 = fmnaOp (tmp, AA84, AA34);
AA35 = fmnaOp (tmp, AA85, AA35);
AA36 = fmnaOp (tmp, AA86, AA36);
AA37 = fmnaOp (tmp, AA87, AA37);
AA38 = mulOp (negOp(tmp), AA88);
AA39 = fmnaOp (tmp, AA89, AA39);
tmp = AA48;
AA40 = fmnaOp (tmp, AA80, AA40);
AA41 = fmnaOp (tmp, AA81, AA41);
AA42 = fmnaOp (tmp, AA82, AA42);
AA43 = fmnaOp (tmp, AA83, AA43);
AA44 = fmnaOp (tmp, AA84, AA44);
AA45 = fmnaOp (tmp, AA85, AA45);
AA46 = fmnaOp (tmp, AA86, AA46);
AA47 = fmnaOp (tmp, AA87, AA47);
AA48 = mulOp (negOp(tmp), AA88);
AA49 = fmnaOp (tmp, AA89, AA49);
tmp = AA58;
AA50 = fmnaOp (tmp, AA80, AA50);
AA51 = fmnaOp (tmp, AA81, AA51);
AA52 = fmnaOp (tmp, AA82, AA52);
AA53 = fmnaOp (tmp, AA83, AA53);
AA54 = fmnaOp (tmp, AA84, AA54);
AA55 = fmnaOp (tmp, AA85, AA55);
AA56 = fmnaOp (tmp, AA86, AA56);
AA57 = fmnaOp (tmp, AA87, AA57);
AA58 = mulOp (negOp(tmp), AA88);
AA59 = fmnaOp (tmp, AA89, AA59);
tmp = AA68;
AA60 = fmnaOp (tmp, AA80, AA60);
AA61 = fmnaOp (tmp, AA81, AA61);
AA62 = fmnaOp (tmp, AA82, AA62);
AA63 = fmnaOp (tmp, AA83, AA63);
AA64 = fmnaOp (tmp, AA84, AA64);
AA65 = fmnaOp (tmp, AA85, AA65);
AA66 = fmnaOp (tmp, AA86, AA66);
AA67 = fmnaOp (tmp, AA87, AA67);
AA68 = mulOp (negOp(tmp), AA88);
AA69 = fmnaOp (tmp, AA89, AA69);
tmp = AA78;
AA70 = fmnaOp (tmp, AA80, AA70);
AA71 = fmnaOp (tmp, AA81, AA71);
AA72 = fmnaOp (tmp, AA82, AA72);
AA73 = fmnaOp (tmp, AA83, AA73);
AA74 = fmnaOp (tmp, AA84, AA74);
AA75 = fmnaOp (tmp, AA85, AA75);
AA76 = fmnaOp (tmp, AA86, AA76);
AA77 = fmnaOp (tmp, AA87, AA77);
AA78 = mulOp (negOp(tmp), AA88);
AA79 = fmnaOp (tmp, AA89, AA79);
tmp = AA98;
AA90 = fmnaOp (tmp, AA80, AA90);
AA91 = fmnaOp (tmp, AA81, AA91);
AA92 = fmnaOp (tmp, AA82, AA92);
AA93 = fmnaOp (tmp, AA83, AA93);
AA94 = fmnaOp (tmp, AA84, AA94);
AA95 = fmnaOp (tmp, AA85, AA95);
AA96 = fmnaOp (tmp, AA86, AA96);
AA97 = fmnaOp (tmp, AA87, AA97);
AA98 = mulOp (negOp(tmp), AA88);
AA99 = fmnaOp (tmp, AA89, AA99);
/****************** iteration 9 ****************/
/* scale current row */
tmp = rcpOp (AA99);
icol9 = perm9;
AA90 = mulOp (tmp, AA90);
AA91 = mulOp (tmp, AA91);
AA92 = mulOp (tmp, AA92);
AA93 = mulOp (tmp, AA93);
AA94 = mulOp (tmp, AA94);
AA95 = mulOp (tmp, AA95);
AA96 = mulOp (tmp, AA96);
AA97 = mulOp (tmp, AA97);
AA98 = mulOp (tmp, AA98);
AA99 = tmp;
/* eliminate above and below current row */
tmp = AA09;
AA00 = fmnaOp (tmp, AA90, AA00);
AA01 = fmnaOp (tmp, AA91, AA01);
AA02 = fmnaOp (tmp, AA92, AA02);
AA03 = fmnaOp (tmp, AA93, AA03);
AA04 = fmnaOp (tmp, AA94, AA04);
AA05 = fmnaOp (tmp, AA95, AA05);
AA06 = fmnaOp (tmp, AA96, AA06);
AA07 = fmnaOp (tmp, AA97, AA07);
AA08 = fmnaOp (tmp, AA98, AA08);
AA09 = mulOp (negOp(tmp), AA99);
tmp = AA19;
AA10 = fmnaOp (tmp, AA90, AA10);
AA11 = fmnaOp (tmp, AA91, AA11);
AA12 = fmnaOp (tmp, AA92, AA12);
AA13 = fmnaOp (tmp, AA93, AA13);
AA14 = fmnaOp (tmp, AA94, AA14);
AA15 = fmnaOp (tmp, AA95, AA15);
AA16 = fmnaOp (tmp, AA96, AA16);
AA17 = fmnaOp (tmp, AA97, AA17);
AA18 = fmnaOp (tmp, AA98, AA18);
AA19 = mulOp (negOp(tmp), AA99);
tmp = AA29;
AA20 = fmnaOp (tmp, AA90, AA20);
AA21 = fmnaOp (tmp, AA91, AA21);
AA22 = fmnaOp (tmp, AA92, AA22);
AA23 = fmnaOp (tmp, AA93, AA23);
AA24 = fmnaOp (tmp, AA94, AA24);
AA25 = fmnaOp (tmp, AA95, AA25);
AA26 = fmnaOp (tmp, AA96, AA26);
AA27 = fmnaOp (tmp, AA97, AA27);
AA28 = fmnaOp (tmp, AA98, AA28);
AA29 = mulOp (negOp(tmp), AA99);
tmp = AA39;
AA30 = fmnaOp (tmp, AA90, AA30);
AA31 = fmnaOp (tmp, AA91, AA31);
AA32 = fmnaOp (tmp, AA92, AA32);
AA33 = fmnaOp (tmp, AA93, AA33);
AA34 = fmnaOp (tmp, AA94, AA34);
AA35 = fmnaOp (tmp, AA95, AA35);
AA36 = fmnaOp (tmp, AA96, AA36);
AA37 = fmnaOp (tmp, AA97, AA37);
AA38 = fmnaOp (tmp, AA98, AA38);
AA39 = mulOp (negOp(tmp), AA99);
tmp = AA49;
AA40 = fmnaOp (tmp, AA90, AA40);
AA41 = fmnaOp (tmp, AA91, AA41);
AA42 = fmnaOp (tmp, AA92, AA42);
AA43 = fmnaOp (tmp, AA93, AA43);
AA44 = fmnaOp (tmp, AA94, AA44);
AA45 = fmnaOp (tmp, AA95, AA45);
AA46 = fmnaOp (tmp, AA96, AA46);
AA47 = fmnaOp (tmp, AA97, AA47);
AA48 = fmnaOp (tmp, AA98, AA48);
AA49 = mulOp (negOp(tmp), AA99);
tmp = AA59;
AA50 = fmnaOp (tmp, AA90, AA50);
AA51 = fmnaOp (tmp, AA91, AA51);
AA52 = fmnaOp (tmp, AA92, AA52);
AA53 = fmnaOp (tmp, AA93, AA53);
AA54 = fmnaOp (tmp, AA94, AA54);
AA55 = fmnaOp (tmp, AA95, AA55);
AA56 = fmnaOp (tmp, AA96, AA56);
AA57 = fmnaOp (tmp, AA97, AA57);
AA58 = fmnaOp (tmp, AA98, AA58);
AA59 = mulOp (negOp(tmp), AA99);
tmp = AA69;
AA60 = fmnaOp (tmp, AA90, AA60);
AA61 = fmnaOp (tmp, AA91, AA61);
AA62 = fmnaOp (tmp, AA92, AA62);
AA63 = fmnaOp (tmp, AA93, AA63);
AA64 = fmnaOp (tmp, AA94, AA64);
AA65 = fmnaOp (tmp, AA95, AA65);
AA66 = fmnaOp (tmp, AA96, AA66);
AA67 = fmnaOp (tmp, AA97, AA67);
AA68 = fmnaOp (tmp, AA98, AA68);
AA69 = mulOp (negOp(tmp), AA99);
tmp = AA79;
AA70 = fmnaOp (tmp, AA90, AA70);
AA71 = fmnaOp (tmp, AA91, AA71);
AA72 = fmnaOp (tmp, AA92, AA72);
AA73 = fmnaOp (tmp, AA93, AA73);
AA74 = fmnaOp (tmp, AA94, AA74);
AA75 = fmnaOp (tmp, AA95, AA75);
AA76 = fmnaOp (tmp, AA96, AA76);
AA77 = fmnaOp (tmp, AA97, AA77);
AA78 = fmnaOp (tmp, AA98, AA78);
AA79 = mulOp (negOp(tmp), AA99);
tmp = AA89;
AA80 = fmnaOp (tmp, AA90, AA80);
AA81 = fmnaOp (tmp, AA91, AA81);
AA82 = fmnaOp (tmp, AA92, AA82);
AA83 = fmnaOp (tmp, AA93, AA83);
AA84 = fmnaOp (tmp, AA94, AA84);
AA85 = fmnaOp (tmp, AA95, AA85);
AA86 = fmnaOp (tmp, AA96, AA86);
AA87 = fmnaOp (tmp, AA97, AA87);
AA88 = fmnaOp (tmp, AA98, AA88);
AA89 = mulOp (negOp(tmp), AA99);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(7,icol0) = AA70;
Ainv(8,icol0) = AA80;
Ainv(9,icol0) = AA90;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(7,icol1) = AA71;
Ainv(8,icol1) = AA81;
Ainv(9,icol1) = AA91;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(7,icol2) = AA72;
Ainv(8,icol2) = AA82;
Ainv(9,icol2) = AA92;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(7,icol3) = AA73;
Ainv(8,icol3) = AA83;
Ainv(9,icol3) = AA93;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(7,icol4) = AA74;
Ainv(8,icol4) = AA84;
Ainv(9,icol4) = AA94;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(7,icol5) = AA75;
Ainv(8,icol5) = AA85;
Ainv(9,icol5) = AA95;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
Ainv(7,icol6) = AA76;
Ainv(8,icol6) = AA86;
Ainv(9,icol6) = AA96;
Ainv(0,icol7) = AA07;
Ainv(1,icol7) = AA17;
Ainv(2,icol7) = AA27;
Ainv(3,icol7) = AA37;
Ainv(4,icol7) = AA47;
Ainv(5,icol7) = AA57;
Ainv(6,icol7) = AA67;
Ainv(7,icol7) = AA77;
Ainv(8,icol7) = AA87;
Ainv(9,icol7) = AA97;
Ainv(0,icol8) = AA08;
Ainv(1,icol8) = AA18;
Ainv(2,icol8) = AA28;
Ainv(3,icol8) = AA38;
Ainv(4,icol8) = AA48;
Ainv(5,icol8) = AA58;
Ainv(6,icol8) = AA68;
Ainv(7,icol8) = AA78;
Ainv(8,icol8) = AA88;
Ainv(9,icol8) = AA98;
Ainv(0,icol9) = AA09;
Ainv(1,icol9) = AA19;
Ainv(2,icol9) = AA29;
Ainv(3,icol9) = AA39;
Ainv(4,icol9) = AA49;
Ainv(5,icol9) = AA59;
Ainv(6,icol9) = AA69;
Ainv(7,icol9) = AA79;
Ainv(8,icol9) = AA89;
Ainv(9,icol9) = AA99;
}
} /* if (!isDoubleComplex<T>()) */
}
extern __shared__ double2 shmem[];
template<typename T, int pad, int pivot_thrds, int arch>
__global__ void
__launch_bounds__ (config<T,arch>::gje3MaxThrds, config<T,arch>::gje3MinBlks)
matinv_gje3 (const T *A, T *Ainv, int N, int batch)
{
T *As = (T*)shmem;
typename config<T,arch>::absValType *Val =
(typename config<T,arch>::absValType *)(As + (N+pad) * N);
int *Loc = (int*)(Val + pivot_thrds);
int *icol = (int*)(Loc + pivot_thrds);
int *perm = (int*)(icol + N);
T diagRcp;
const int ofs = pad;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
if (blkNum >= batch) return;
A += blkNum * N * N;
Ainv += blkNum * N * N;
/* Load matrix into shared memory */
for (int i = tx; i < N; i += blockDim.x) {
As(i,ty) = A[ty * N + i];
}
/* initialize row permutation vector */
if (tx == 0) perm[ty] = ty;
int j = 0;
do {
/* Look for pivot */
__syncthreads();
if ((tx == 0) && (ty < pivot_thrds)) {
typename config<T,arch>::absValType val0 = absOp (As(j,j));
int loc0 = j;
int i = j + 1 + ty;
T *dp = &As(i,j);
const int incr = &As(pivot_thrds,0)-&As(0,0);
while (i < N) {
typename config<T,arch>::absValType vali = absOp (*dp);
if (val0 < vali) {
val0 = vali;
loc0 = i;
}
dp += incr;
i += pivot_thrds;
}
Loc[ty] = loc0;
if (pivot_thrds > 1) Val[ty] = val0;
}
/* Swap current row with pivot */
__syncthreads();
if (tx == 0) {
T tmp;
int it;
int Pl = Loc[0];
if (pivot_thrds > 1) {
typename config<T,arch>::absValType val = Val[0];
int i = 1;
for (; i < (pivot_thrds-1); i++) {
if (Val[i] > val) {
Pl = Loc[i];
val = Val[i];
}
}
if (Val[i] > val) {
Pl = Loc[i];
}
}
tmp = As(Pl,ty);
As(Pl,ty) = As(j,ty);
As(j,ty) = tmp;
/* update permutation vector based on row swap */
if (ty == j) {
it = perm[Pl];
perm[Pl] = perm[j];
perm[j] = it;
}
}
/* scale current row, except current column */
__syncthreads();
diagRcp = rcpOp (As(j,j));
if ((tx == 0) && !(ty == j)) {
As(j,ty) = mulOp (As(j,ty), diagRcp);
}
/* update above and below current row, except current column */
__syncthreads();
for (int i = tx; i < N; i += blockDim.x) {
if ((i != j) && !(ty == j)) {
As(i,ty) = fmnaOp (As(i,j), As(j,ty), As(i,ty));
}
}
/* update current column, and column permutation vector */
__syncthreads();
if (tx == 0) {
As(ty,j) = (ty == j) ? diagRcp : negOp (mulOp (As(ty,j), diagRcp));
if (ty == j) {
icol[j] = perm[j];
}
}
j++;
} while (j < N);
__syncthreads();
for (int i = tx; i < N; i += blockDim.x) {
Ainv[icol[ty] * N + i] = As(i,ty);
}
}
template <typename T, int arch>
int matinv_gje3 (const T *A_d, T *Ainv_d, int n, int batch)
{
typedef void (* func)(const T *A_d, T *Ainv_d, int n, int batch);
static int padding[110] = {
config<T,arch>::gje3Pad_00, config<T,arch>::gje3Pad_01,
config<T,arch>::gje3Pad_02, config<T,arch>::gje3Pad_03,
config<T,arch>::gje3Pad_04, config<T,arch>::gje3Pad_05,
config<T,arch>::gje3Pad_06, config<T,arch>::gje3Pad_07,
config<T,arch>::gje3Pad_08, config<T,arch>::gje3Pad_09,
config<T,arch>::gje3Pad_10, config<T,arch>::gje3Pad_11,
config<T,arch>::gje3Pad_12, config<T,arch>::gje3Pad_13,
config<T,arch>::gje3Pad_14, config<T,arch>::gje3Pad_15,
config<T,arch>::gje3Pad_16, config<T,arch>::gje3Pad_17,
config<T,arch>::gje3Pad_18, config<T,arch>::gje3Pad_19,
config<T,arch>::gje3Pad_20, config<T,arch>::gje3Pad_21,
config<T,arch>::gje3Pad_22, config<T,arch>::gje3Pad_23,
config<T,arch>::gje3Pad_24, config<T,arch>::gje3Pad_25,
config<T,arch>::gje3Pad_26, config<T,arch>::gje3Pad_27,
config<T,arch>::gje3Pad_28, config<T,arch>::gje3Pad_29,
config<T,arch>::gje3Pad_30, config<T,arch>::gje3Pad_31,
config<T,arch>::gje3Pad_32, config<T,arch>::gje3Pad_33,
config<T,arch>::gje3Pad_34, config<T,arch>::gje3Pad_35,
config<T,arch>::gje3Pad_36, config<T,arch>::gje3Pad_37,
config<T,arch>::gje3Pad_38, config<T,arch>::gje3Pad_39,
config<T,arch>::gje3Pad_40, config<T,arch>::gje3Pad_41,
config<T,arch>::gje3Pad_42, config<T,arch>::gje3Pad_43,
config<T,arch>::gje3Pad_44, config<T,arch>::gje3Pad_45,
config<T,arch>::gje3Pad_46, config<T,arch>::gje3Pad_47,
config<T,arch>::gje3Pad_48, config<T,arch>::gje3Pad_49,
config<T,arch>::gje3Pad_50, config<T,arch>::gje3Pad_51,
config<T,arch>::gje3Pad_52, config<T,arch>::gje3Pad_53,
config<T,arch>::gje3Pad_54, config<T,arch>::gje3Pad_55,
config<T,arch>::gje3Pad_56, config<T,arch>::gje3Pad_57,
config<T,arch>::gje3Pad_58, config<T,arch>::gje3Pad_59,
config<T,arch>::gje3Pad_60, config<T,arch>::gje3Pad_61,
config<T,arch>::gje3Pad_62, config<T,arch>::gje3Pad_63,
config<T,arch>::gje3Pad_64, config<T,arch>::gje3Pad_65,
config<T,arch>::gje3Pad_66, config<T,arch>::gje3Pad_67,
config<T,arch>::gje3Pad_68, config<T,arch>::gje3Pad_69,
config<T,arch>::gje3Pad_70, config<T,arch>::gje3Pad_71,
config<T,arch>::gje3Pad_72, config<T,arch>::gje3Pad_73,
config<T,arch>::gje3Pad_74, config<T,arch>::gje3Pad_75,
config<T,arch>::gje3Pad_76, config<T,arch>::gje3Pad_77,
config<T,arch>::gje3Pad_78, config<T,arch>::gje3Pad_79,
config<T,arch>::gje3Pad_80, config<T,arch>::gje3Pad_81,
config<T,arch>::gje3Pad_82, config<T,arch>::gje3Pad_83,
config<T,arch>::gje3Pad_84, config<T,arch>::gje3Pad_85,
config<T,arch>::gje3Pad_86, config<T,arch>::gje3Pad_87,
config<T,arch>::gje3Pad_88, config<T,arch>::gje3Pad_89,
config<T,arch>::gje3Pad_90, config<T,arch>::gje3Pad_91,
config<T,arch>::gje3Pad_92, config<T,arch>::gje3Pad_93,
config<T,arch>::gje3Pad_94, config<T,arch>::gje3Pad_95,
config<T,arch>::gje3Pad_96, config<T,arch>::gje3Pad_97,
config<T,arch>::gje3Pad_98, config<T,arch>::gje3Pad_99,
config<T,arch>::gje3Pad_100,config<T,arch>::gje3Pad_101,
config<T,arch>::gje3Pad_102,config<T,arch>::gje3Pad_103,
config<T,arch>::gje3Pad_104,config<T,arch>::gje3Pad_105,
config<T,arch>::gje3Pad_106,config<T,arch>::gje3Pad_107,
config<T,arch>::gje3Pad_108,config<T,arch>::gje3Pad_109
};
static int dimX[110] = {
config<T,arch>::gje3DimX_00, config<T,arch>::gje3DimX_01,
config<T,arch>::gje3DimX_02, config<T,arch>::gje3DimX_03,
config<T,arch>::gje3DimX_04, config<T,arch>::gje3DimX_05,
config<T,arch>::gje3DimX_06, config<T,arch>::gje3DimX_07,
config<T,arch>::gje3DimX_08, config<T,arch>::gje3DimX_09,
config<T,arch>::gje3DimX_10, config<T,arch>::gje3DimX_11,
config<T,arch>::gje3DimX_12, config<T,arch>::gje3DimX_13,
config<T,arch>::gje3DimX_14, config<T,arch>::gje3DimX_15,
config<T,arch>::gje3DimX_16, config<T,arch>::gje3DimX_17,
config<T,arch>::gje3DimX_18, config<T,arch>::gje3DimX_19,
config<T,arch>::gje3DimX_20, config<T,arch>::gje3DimX_21,
config<T,arch>::gje3DimX_22, config<T,arch>::gje3DimX_23,
config<T,arch>::gje3DimX_24, config<T,arch>::gje3DimX_25,
config<T,arch>::gje3DimX_26, config<T,arch>::gje3DimX_27,
config<T,arch>::gje3DimX_28, config<T,arch>::gje3DimX_29,
config<T,arch>::gje3DimX_30, config<T,arch>::gje3DimX_31,
config<T,arch>::gje3DimX_32, config<T,arch>::gje3DimX_33,
config<T,arch>::gje3DimX_34, config<T,arch>::gje3DimX_35,
config<T,arch>::gje3DimX_36, config<T,arch>::gje3DimX_37,
config<T,arch>::gje3DimX_38, config<T,arch>::gje3DimX_39,
config<T,arch>::gje3DimX_40, config<T,arch>::gje3DimX_41,
config<T,arch>::gje3DimX_42, config<T,arch>::gje3DimX_43,
config<T,arch>::gje3DimX_44, config<T,arch>::gje3DimX_45,
config<T,arch>::gje3DimX_46, config<T,arch>::gje3DimX_47,
config<T,arch>::gje3DimX_48, config<T,arch>::gje3DimX_49,
config<T,arch>::gje3DimX_50, config<T,arch>::gje3DimX_51,
config<T,arch>::gje3DimX_52, config<T,arch>::gje3DimX_53,
config<T,arch>::gje3DimX_54, config<T,arch>::gje3DimX_55,
config<T,arch>::gje3DimX_56, config<T,arch>::gje3DimX_57,
config<T,arch>::gje3DimX_58, config<T,arch>::gje3DimX_59,
config<T,arch>::gje3DimX_60, config<T,arch>::gje3DimX_61,
config<T,arch>::gje3DimX_62, config<T,arch>::gje3DimX_63,
config<T,arch>::gje3DimX_64, config<T,arch>::gje3DimX_65,
config<T,arch>::gje3DimX_66, config<T,arch>::gje3DimX_67,
config<T,arch>::gje3DimX_68, config<T,arch>::gje3DimX_69,
config<T,arch>::gje3DimX_70, config<T,arch>::gje3DimX_71,
config<T,arch>::gje3DimX_72, config<T,arch>::gje3DimX_73,
config<T,arch>::gje3DimX_74, config<T,arch>::gje3DimX_75,
config<T,arch>::gje3DimX_76, config<T,arch>::gje3DimX_77,
config<T,arch>::gje3DimX_78, config<T,arch>::gje3DimX_79,
config<T,arch>::gje3DimX_80, config<T,arch>::gje3DimX_81,
config<T,arch>::gje3DimX_82, config<T,arch>::gje3DimX_83,
config<T,arch>::gje3DimX_84, config<T,arch>::gje3DimX_85,
config<T,arch>::gje3DimX_86, config<T,arch>::gje3DimX_87,
config<T,arch>::gje3DimX_88, config<T,arch>::gje3DimX_89,
config<T,arch>::gje3DimX_90, config<T,arch>::gje3DimX_91,
config<T,arch>::gje3DimX_92, config<T,arch>::gje3DimX_93,
config<T,arch>::gje3DimX_94, config<T,arch>::gje3DimX_95,
config<T,arch>::gje3DimX_96, config<T,arch>::gje3DimX_97,
config<T,arch>::gje3DimX_98, config<T,arch>::gje3DimX_99,
config<T,arch>::gje3DimX_100,config<T,arch>::gje3DimX_101,
config<T,arch>::gje3DimX_102,config<T,arch>::gje3DimX_103,
config<T,arch>::gje3DimX_104,config<T,arch>::gje3DimX_105,
config<T,arch>::gje3DimX_106,config<T,arch>::gje3DimX_107,
config<T,arch>::gje3DimX_108,config<T,arch>::gje3DimX_109
};
static int srchThrd[110] = {
config<T,arch>::gje3SrchThrd_00, config<T,arch>::gje3SrchThrd_01,
config<T,arch>::gje3SrchThrd_02, config<T,arch>::gje3SrchThrd_03,
config<T,arch>::gje3SrchThrd_04, config<T,arch>::gje3SrchThrd_05,
config<T,arch>::gje3SrchThrd_06, config<T,arch>::gje3SrchThrd_07,
config<T,arch>::gje3SrchThrd_08, config<T,arch>::gje3SrchThrd_09,
config<T,arch>::gje3SrchThrd_10, config<T,arch>::gje3SrchThrd_11,
config<T,arch>::gje3SrchThrd_12, config<T,arch>::gje3SrchThrd_13,
config<T,arch>::gje3SrchThrd_14, config<T,arch>::gje3SrchThrd_15,
config<T,arch>::gje3SrchThrd_16, config<T,arch>::gje3SrchThrd_17,
config<T,arch>::gje3SrchThrd_18, config<T,arch>::gje3SrchThrd_19,
config<T,arch>::gje3SrchThrd_20, config<T,arch>::gje3SrchThrd_21,
config<T,arch>::gje3SrchThrd_22, config<T,arch>::gje3SrchThrd_23,
config<T,arch>::gje3SrchThrd_24, config<T,arch>::gje3SrchThrd_25,
config<T,arch>::gje3SrchThrd_26, config<T,arch>::gje3SrchThrd_27,
config<T,arch>::gje3SrchThrd_28, config<T,arch>::gje3SrchThrd_29,
config<T,arch>::gje3SrchThrd_30, config<T,arch>::gje3SrchThrd_31,
config<T,arch>::gje3SrchThrd_32, config<T,arch>::gje3SrchThrd_33,
config<T,arch>::gje3SrchThrd_34, config<T,arch>::gje3SrchThrd_35,
config<T,arch>::gje3SrchThrd_36, config<T,arch>::gje3SrchThrd_37,
config<T,arch>::gje3SrchThrd_38, config<T,arch>::gje3SrchThrd_39,
config<T,arch>::gje3SrchThrd_40, config<T,arch>::gje3SrchThrd_41,
config<T,arch>::gje3SrchThrd_42, config<T,arch>::gje3SrchThrd_43,
config<T,arch>::gje3SrchThrd_44, config<T,arch>::gje3SrchThrd_45,
config<T,arch>::gje3SrchThrd_46, config<T,arch>::gje3SrchThrd_47,
config<T,arch>::gje3SrchThrd_48, config<T,arch>::gje3SrchThrd_49,
config<T,arch>::gje3SrchThrd_50, config<T,arch>::gje3SrchThrd_51,
config<T,arch>::gje3SrchThrd_52, config<T,arch>::gje3SrchThrd_53,
config<T,arch>::gje3SrchThrd_54, config<T,arch>::gje3SrchThrd_55,
config<T,arch>::gje3SrchThrd_56, config<T,arch>::gje3SrchThrd_57,
config<T,arch>::gje3SrchThrd_58, config<T,arch>::gje3SrchThrd_59,
config<T,arch>::gje3SrchThrd_60, config<T,arch>::gje3SrchThrd_61,
config<T,arch>::gje3SrchThrd_62, config<T,arch>::gje3SrchThrd_63,
config<T,arch>::gje3SrchThrd_64, config<T,arch>::gje3SrchThrd_65,
config<T,arch>::gje3SrchThrd_66, config<T,arch>::gje3SrchThrd_67,
config<T,arch>::gje3SrchThrd_68, config<T,arch>::gje3SrchThrd_69,
config<T,arch>::gje3SrchThrd_70, config<T,arch>::gje3SrchThrd_71,
config<T,arch>::gje3SrchThrd_72, config<T,arch>::gje3SrchThrd_73,
config<T,arch>::gje3SrchThrd_74, config<T,arch>::gje3SrchThrd_75,
config<T,arch>::gje3SrchThrd_76, config<T,arch>::gje3SrchThrd_77,
config<T,arch>::gje3SrchThrd_78, config<T,arch>::gje3SrchThrd_79,
config<T,arch>::gje3SrchThrd_80, config<T,arch>::gje3SrchThrd_81,
config<T,arch>::gje3SrchThrd_82, config<T,arch>::gje3SrchThrd_83,
config<T,arch>::gje3SrchThrd_84, config<T,arch>::gje3SrchThrd_85,
config<T,arch>::gje3SrchThrd_86, config<T,arch>::gje3SrchThrd_87,
config<T,arch>::gje3SrchThrd_88, config<T,arch>::gje3SrchThrd_89,
config<T,arch>::gje3SrchThrd_90, config<T,arch>::gje3SrchThrd_91,
config<T,arch>::gje3SrchThrd_92, config<T,arch>::gje3SrchThrd_93,
config<T,arch>::gje3SrchThrd_94, config<T,arch>::gje3SrchThrd_95,
config<T,arch>::gje3SrchThrd_96, config<T,arch>::gje3SrchThrd_97,
config<T,arch>::gje3SrchThrd_98, config<T,arch>::gje3SrchThrd_99,
config<T,arch>::gje3SrchThrd_100,config<T,arch>::gje3SrchThrd_101,
config<T,arch>::gje3SrchThrd_102,config<T,arch>::gje3SrchThrd_103,
config<T,arch>::gje3SrchThrd_104,config<T,arch>::gje3SrchThrd_105,
config<T,arch>::gje3SrchThrd_106,config<T,arch>::gje3SrchThrd_107,
config<T,arch>::gje3SrchThrd_108,config<T,arch>::gje3SrchThrd_109
};
func pf[110] = {
0,
0,
matinv_gje3<T, config<T,arch>::gje3Pad_02, config<T,arch>::gje3SrchThrd_02, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_03, config<T,arch>::gje3SrchThrd_03, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_04, config<T,arch>::gje3SrchThrd_04, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_05, config<T,arch>::gje3SrchThrd_05, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_06, config<T,arch>::gje3SrchThrd_06, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_07, config<T,arch>::gje3SrchThrd_07, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_08, config<T,arch>::gje3SrchThrd_08, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_09, config<T,arch>::gje3SrchThrd_09, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_10, config<T,arch>::gje3SrchThrd_10, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_11, config<T,arch>::gje3SrchThrd_11, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_12, config<T,arch>::gje3SrchThrd_12, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_13, config<T,arch>::gje3SrchThrd_13, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_14, config<T,arch>::gje3SrchThrd_14, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_15, config<T,arch>::gje3SrchThrd_15, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_16, config<T,arch>::gje3SrchThrd_16, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_17, config<T,arch>::gje3SrchThrd_17, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_18, config<T,arch>::gje3SrchThrd_18, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_19, config<T,arch>::gje3SrchThrd_19, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_20, config<T,arch>::gje3SrchThrd_20, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_21, config<T,arch>::gje3SrchThrd_21, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_22, config<T,arch>::gje3SrchThrd_22, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_23, config<T,arch>::gje3SrchThrd_23, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_24, config<T,arch>::gje3SrchThrd_24, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_25, config<T,arch>::gje3SrchThrd_25, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_26, config<T,arch>::gje3SrchThrd_26, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_27, config<T,arch>::gje3SrchThrd_27, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_28, config<T,arch>::gje3SrchThrd_28, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_29, config<T,arch>::gje3SrchThrd_29, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_30, config<T,arch>::gje3SrchThrd_30, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_31, config<T,arch>::gje3SrchThrd_31, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_32, config<T,arch>::gje3SrchThrd_32, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_33, config<T,arch>::gje3SrchThrd_33, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_34, config<T,arch>::gje3SrchThrd_34, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_35, config<T,arch>::gje3SrchThrd_35, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_36, config<T,arch>::gje3SrchThrd_36, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_37, config<T,arch>::gje3SrchThrd_37, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_38, config<T,arch>::gje3SrchThrd_38, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_39, config<T,arch>::gje3SrchThrd_39, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_40, config<T,arch>::gje3SrchThrd_40, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_41, config<T,arch>::gje3SrchThrd_41, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_42, config<T,arch>::gje3SrchThrd_42, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_43, config<T,arch>::gje3SrchThrd_43, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_44, config<T,arch>::gje3SrchThrd_44, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_45, config<T,arch>::gje3SrchThrd_45, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_46, config<T,arch>::gje3SrchThrd_46, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_47, config<T,arch>::gje3SrchThrd_47, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_48, config<T,arch>::gje3SrchThrd_48, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_49, config<T,arch>::gje3SrchThrd_49, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_50, config<T,arch>::gje3SrchThrd_50, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_51, config<T,arch>::gje3SrchThrd_51, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_52, config<T,arch>::gje3SrchThrd_52, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_53, config<T,arch>::gje3SrchThrd_53, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_54, config<T,arch>::gje3SrchThrd_54, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_55, config<T,arch>::gje3SrchThrd_55, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_56, config<T,arch>::gje3SrchThrd_56, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_57, config<T,arch>::gje3SrchThrd_57, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_58, config<T,arch>::gje3SrchThrd_58, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_59, config<T,arch>::gje3SrchThrd_59, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_60, config<T,arch>::gje3SrchThrd_60, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_61, config<T,arch>::gje3SrchThrd_61, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_62, config<T,arch>::gje3SrchThrd_62, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_63, config<T,arch>::gje3SrchThrd_63, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_64, config<T,arch>::gje3SrchThrd_64, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_65, config<T,arch>::gje3SrchThrd_65, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_66, config<T,arch>::gje3SrchThrd_66, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_67, config<T,arch>::gje3SrchThrd_67, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_68, config<T,arch>::gje3SrchThrd_68, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_69, config<T,arch>::gje3SrchThrd_69, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_70, config<T,arch>::gje3SrchThrd_70, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_71, config<T,arch>::gje3SrchThrd_71, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_72, config<T,arch>::gje3SrchThrd_72, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_73, config<T,arch>::gje3SrchThrd_73, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_74, config<T,arch>::gje3SrchThrd_74, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_75, config<T,arch>::gje3SrchThrd_75, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_76, config<T,arch>::gje3SrchThrd_76, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_77, config<T,arch>::gje3SrchThrd_77, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_78, config<T,arch>::gje3SrchThrd_78, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_79, config<T,arch>::gje3SrchThrd_79, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_80, config<T,arch>::gje3SrchThrd_80, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_81, config<T,arch>::gje3SrchThrd_81, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_82, config<T,arch>::gje3SrchThrd_82, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_83, config<T,arch>::gje3SrchThrd_83, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_84, config<T,arch>::gje3SrchThrd_84, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_85, config<T,arch>::gje3SrchThrd_85, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_86, config<T,arch>::gje3SrchThrd_86, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_87, config<T,arch>::gje3SrchThrd_87, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_88, config<T,arch>::gje3SrchThrd_88, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_89, config<T,arch>::gje3SrchThrd_89, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_90, config<T,arch>::gje3SrchThrd_90, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_91, config<T,arch>::gje3SrchThrd_91, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_92, config<T,arch>::gje3SrchThrd_92, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_93, config<T,arch>::gje3SrchThrd_93, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_94, config<T,arch>::gje3SrchThrd_94, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_95, config<T,arch>::gje3SrchThrd_95, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_96, config<T,arch>::gje3SrchThrd_96, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_97, config<T,arch>::gje3SrchThrd_97, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_98, config<T,arch>::gje3SrchThrd_98, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_99, config<T,arch>::gje3SrchThrd_99, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_100,config<T,arch>::gje3SrchThrd_100,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_101,config<T,arch>::gje3SrchThrd_101,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_102,config<T,arch>::gje3SrchThrd_102,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_103,config<T,arch>::gje3SrchThrd_103,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_104,config<T,arch>::gje3SrchThrd_104,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_105,config<T,arch>::gje3SrchThrd_105,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_106,config<T,arch>::gje3SrchThrd_106,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_107,config<T,arch>::gje3SrchThrd_107,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_108,config<T,arch>::gje3SrchThrd_108,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_109,config<T,arch>::gje3SrchThrd_109,arch>,
};
if (n < config<T,arch>::gje3MinDim || n > config<T,arch>::gje3MaxDim ||
batch < 1) {
return -1;
}
dim3 dimBlock(dimX[n], n);
dim3 dimGrid;
if (batch <= GRID_DIM_LIMIT) {
dimGrid.x = batch;
dimGrid.y = 1;
dimGrid.z = 1;
} else {
dimGrid.x = GRID_DIM_LIMIT;
dimGrid.y = (batch + GRID_DIM_LIMIT-1) / GRID_DIM_LIMIT;
dimGrid.z = 1;
}
int smem_size = (sizeof(A_d[0]) * (n + padding[n]) * (n) + // As
sizeof(typename config<T,arch>::absValType) * srchThrd[n] + // Val
sizeof(int) * srchThrd[n] + // Loc
sizeof(int) * n + // icol
sizeof(int) * n); // perm
pfhipLaunchKernelGGL(([n)], dim3(dimGrid),dim3(dimBlock),smem_size, 0, A_d,Ainv_d,n,batch);
/* Check synchronous errors, i.e. pre-launch */
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
return -2;
}
return 0;
}
template <typename T, int arch>
int matinv_MatPerThread (const T *A_d, T *Ainv_d, int n, int batch)
{
typedef void (* func)(const T *A_d, T *Ainv_d, int batch);
int minBatchSize [11] = {
0x7fffffff,
0x7fffffff,
config<T,arch>::matInv2x2MinBatch,
config<T,arch>::matInv3x3MinBatch,
config<T,arch>::matInv4x4MinBatch,
config<T,arch>::matInv5x5MinBatch,
config<T,arch>::matInv6x6MinBatch,
config<T,arch>::matInv7x7MinBatch,
config<T,arch>::matInv8x8MinBatch,
config<T,arch>::matInv9x9MinBatch,
config<T,arch>::matInv10x10MinBatch
};
func pf[11] = {
0,
0,
matinv_2x2_matrix_per_thread<T,arch>,
matinv_3x3_matrix_per_thread<T,arch>,
matinv_4x4_matrix_per_thread<T,arch>,
matinv_5x5_matrix_per_thread<T,arch>,
matinv_6x6_matrix_per_thread<T,arch>,
matinv_7x7_matrix_per_thread<T,arch>,
matinv_8x8_matrix_per_thread<T,arch>,
matinv_9x9_matrix_per_thread<T,arch>,
matinv_10x10_matrix_per_thread<T,arch>
};
hipError_t err;
dim3 dimBlock(128);
dim3 dimGrid;
int numBlocks;
if (n < config<T,arch>::matInvMinDim || batch < 1) {
return -1;
}
if (n > config<T,arch>::matInvMaxDim || batch < minBatchSize[n]) {
return 1;
}
switch (n) {
case 4:
err = hipFuncSetCacheConfig (matinv_4x4_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
case 5:
err = hipFuncSetCacheConfig (matinv_5x5_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
case 6:
err = hipFuncSetCacheConfig (matinv_6x6_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
case 7:
err = hipFuncSetCacheConfig (matinv_7x7_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
case 8:
err = hipFuncSetCacheConfig (matinv_8x8_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
case 9:
err = hipFuncSetCacheConfig (matinv_9x9_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
case 10:
err = hipFuncSetCacheConfig (matinv_10x10_matrix_per_thread<T,arch>,
hipFuncCachePreferL1);
break;
default:
err = hipSuccess;
break;
}
if (err != hipSuccess) {
return -2;
}
numBlocks = (batch + dimBlock.x - 1) / dimBlock.x;
if (numBlocks <= GRID_DIM_LIMIT) {
dimGrid.x = numBlocks;
dimGrid.y = 1;
dimGrid.z = 1;
} else {
dimGrid.x = GRID_DIM_LIMIT;
dimGrid.y = (numBlocks + GRID_DIM_LIMIT-1) / GRID_DIM_LIMIT;
dimGrid.z = 1;
}
pfhipLaunchKernelGGL(([n)], dim3(dimGrid),dim3(dimBlock), 0, 0, A_d,Ainv_d,batch);
/* Check synchronous errors, i.e. pre-launch */
err = hipGetLastError();
if (hipSuccess != err) {
return -2;
}
return 0;
}
/* C callable wrapper functions */
int smatinv_batch (float *A, float *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<float,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<float,GPU_ARCH>(A, Ainv, n, batch);
}
int dmatinv_batch (double *A, double *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<double,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<double,GPU_ARCH>(A, Ainv, n, batch);
}
int cmatinv_batch (hipComplex *A, hipComplex *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<hipComplex,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<hipComplex,GPU_ARCH>(A, Ainv, n, batch);
}
int zmatinv_batch (hipDoubleComplex *A, hipDoubleComplex *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<hipDoubleComplex,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<hipDoubleComplex,GPU_ARCH>(A, Ainv, n, batch);
}
| 6ea77f7e4be818b34f4ea5149eeab2872bea7cb0.cu | /*
* Copyright (c) 2011-2013 NVIDIA Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of NVIDIA Corporation nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* release 1.1:
*
* o added smatinv_batch() for batched inversion of float matrices
* o added cmatinv_batch() for batched inversion of float-complex matrices
* o added special kernels for faster processing of very small matrices
* o added tuning for sm_35 to the configuration template class
*
*/
#include <stdio.h>
#include "cuComplex.h"
#include "inverse.h"
#include "operations.h"
#define GRID_DIM_LIMIT (65520)
#define ARCH_SM13 (0)
#define ARCH_SM20 (1)
#define ARCH_SM30 (2)
#define ARCH_SM35 (3)
#if defined(KEPLER2)
#define GPU_ARCH (ARCH_SM35)
#elif defined(FERMI) || defined(KEPLER1)
/* FIXME: This is a hack: instead of setting up tuning parameters for KEPLER1
platforms we simply re-use the Fermi settings. This very likely leads to
suboptimal performance.
*/
#define GPU_ARCH (ARCH_SM20)
#else
#define GPU_ARCH (ARCH_SM13)
#endif
/* Poor man's typeid */
template <typename T> __device__ int isDoubleComplex();
template <> __device__ int isDoubleComplex<float>() {return 0;};
template <> __device__ int isDoubleComplex<double>() {return 0;};
template <> __device__ int isDoubleComplex<cuComplex>() {return 0;};
template <> __device__ int isDoubleComplex<cuDoubleComplex>() {return 1;};
template <typename T, int arch>
class config {
public:
};
template<> class config<float,ARCH_SM35> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim =109 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 3 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 5 };
enum { gje3DimX_11 = 6 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 7 };
enum { gje3DimX_15 = 5 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 5 };
enum { gje3DimX_18 = 5 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 6 };
enum { gje3DimX_22 = 2 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 4 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 5 };
enum { gje3DimX_30 = 5 };
enum { gje3DimX_31 = 4 };
enum { gje3DimX_32 = 4 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 6 };
enum { gje3DimX_35 = 3 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 6 };
enum { gje3DimX_39 = 7 };
enum { gje3DimX_40 = 7 };
enum { gje3DimX_41 = 6 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 6 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 7 };
enum { gje3DimX_46 = 8 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 10 };
enum { gje3DimX_50 = 10 };
enum { gje3DimX_51 = 8 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 9 };
enum { gje3DimX_54 = 7 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = 8 };
enum { gje3DimX_57 = 10 };
enum { gje3DimX_58 = 10 };
enum { gje3DimX_59 = 9 };
enum { gje3DimX_60 = 8 };
enum { gje3DimX_61 = 6 };
enum { gje3DimX_62 = 6 };
enum { gje3DimX_63 = 11 };
enum { gje3DimX_64 = 12 };
enum { gje3DimX_65 = 12 };
enum { gje3DimX_66 = 11 };
enum { gje3DimX_67 = 8 };
enum { gje3DimX_68 = 8 };
enum { gje3DimX_69 = 5 };
enum { gje3DimX_70 = 6 };
enum { gje3DimX_71 = 7 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 9 };
enum { gje3DimX_74 = 10 };
enum { gje3DimX_75 = 11 };
enum { gje3DimX_76 = 12 };
enum { gje3DimX_77 = 9 };
enum { gje3DimX_78 = 12 };
enum { gje3DimX_79 = 12 };
enum { gje3DimX_80 = 10 };
enum { gje3DimX_81 = 12 };
enum { gje3DimX_82 = 12 };
enum { gje3DimX_83 = 12 };
enum { gje3DimX_84 = 12 };
enum { gje3DimX_85 = 11 };
enum { gje3DimX_86 = 11 };
enum { gje3DimX_87 = 11 };
enum { gje3DimX_88 = 11 };
enum { gje3DimX_89 = 10 };
enum { gje3DimX_90 = 10 };
enum { gje3DimX_91 = 11 };
enum { gje3DimX_92 = 11 };
enum { gje3DimX_93 = 11 };
enum { gje3DimX_94 = 10 };
enum { gje3DimX_95 = 9 };
enum { gje3DimX_96 = 8 };
enum { gje3DimX_97 = 10 };
enum { gje3DimX_98 = 10 };
enum { gje3DimX_99 = 9 };
enum { gje3DimX_100 = 10 };
enum { gje3DimX_101 = 10 };
enum { gje3DimX_102 = 6 };
enum { gje3DimX_103 = 7 };
enum { gje3DimX_104 = 9 };
enum { gje3DimX_105 = 9 };
enum { gje3DimX_106 = 9 };
enum { gje3DimX_107 = 9 };
enum { gje3DimX_108 = 9 };
enum { gje3DimX_109 = 9 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 5 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 1 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 5 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 3 };
enum { gje3Pad_18 = 5 };
enum { gje3Pad_19 = 4 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 5 };
enum { gje3Pad_22 = 4 };
enum { gje3Pad_23 = 5 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 2 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 1 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 4 };
enum { gje3Pad_31 = 5 };
enum { gje3Pad_32 = 4 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 4 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 4 };
enum { gje3Pad_42 = 3 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 4 };
enum { gje3Pad_47 = 2 };
enum { gje3Pad_48 = 4 };
enum { gje3Pad_49 = 4 };
enum { gje3Pad_50 = 3 };
enum { gje3Pad_51 = 5 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 4 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 1 };
enum { gje3Pad_58 = 1 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 5 };
enum { gje3Pad_64 = 5 };
enum { gje3Pad_65 = 5 };
enum { gje3Pad_66 = 4 };
enum { gje3Pad_67 = 5 };
enum { gje3Pad_68 = 4 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 5 };
enum { gje3Pad_79 = 5 };
enum { gje3Pad_80 = 5 };
enum { gje3Pad_81 = 5 };
enum { gje3Pad_82 = 3 };
enum { gje3Pad_83 = 2 };
enum { gje3Pad_84 = 2 };
enum { gje3Pad_85 = 3 };
enum { gje3Pad_86 = 1 };
enum { gje3Pad_87 = 2 };
enum { gje3Pad_88 = 1 };
enum { gje3Pad_89 = 1 };
enum { gje3Pad_90 = 1 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 5 };
enum { gje3Pad_96 = 4 };
enum { gje3Pad_97 = 5 };
enum { gje3Pad_98 = 5 };
enum { gje3Pad_99 = 2 };
enum { gje3Pad_100 = 5 };
enum { gje3Pad_101 = 5 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 1 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 2 };
enum { gje3Pad_107 = 2 };
enum { gje3Pad_108 = 2 };
enum { gje3Pad_109 = 1 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 1 };
enum { gje3SrchThrd_04 = 1 };
enum { gje3SrchThrd_05 = 1 };
enum { gje3SrchThrd_06 = 1 };
enum { gje3SrchThrd_07 = 1 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 2 };
enum { gje3SrchThrd_17 = 2 };
enum { gje3SrchThrd_18 = 2 };
enum { gje3SrchThrd_19 = 2 };
enum { gje3SrchThrd_20 = 2 };
enum { gje3SrchThrd_21 = 2 };
enum { gje3SrchThrd_22 = 2 };
enum { gje3SrchThrd_23 = 2 };
enum { gje3SrchThrd_24 = 2 };
enum { gje3SrchThrd_25 = 2 };
enum { gje3SrchThrd_26 = 2 };
enum { gje3SrchThrd_27 = 2 };
enum { gje3SrchThrd_28 = 2 };
enum { gje3SrchThrd_29 = 2 };
enum { gje3SrchThrd_30 = 2 };
enum { gje3SrchThrd_31 = 2 };
enum { gje3SrchThrd_32 = 2 };
enum { gje3SrchThrd_33 = 2 };
enum { gje3SrchThrd_34 = 2 };
enum { gje3SrchThrd_35 = 2 };
enum { gje3SrchThrd_36 = 2 };
enum { gje3SrchThrd_37 = 2 };
enum { gje3SrchThrd_38 = 2 };
enum { gje3SrchThrd_39 = 2 };
enum { gje3SrchThrd_40 = 2 };
enum { gje3SrchThrd_41 = 2 };
enum { gje3SrchThrd_42 = 2 };
enum { gje3SrchThrd_43 = 2 };
enum { gje3SrchThrd_44 = 2 };
enum { gje3SrchThrd_45 = 2 };
enum { gje3SrchThrd_46 = 2 };
enum { gje3SrchThrd_47 = 2 };
enum { gje3SrchThrd_48 = 2 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 4 };
enum { gje3SrchThrd_55 = 4 };
enum { gje3SrchThrd_56 = 4 };
enum { gje3SrchThrd_57 = 4 };
enum { gje3SrchThrd_58 = 4 };
enum { gje3SrchThrd_59 = 4 };
enum { gje3SrchThrd_60 = 4 };
enum { gje3SrchThrd_61 = 4 };
enum { gje3SrchThrd_62 = 4 };
enum { gje3SrchThrd_63 = 4 };
enum { gje3SrchThrd_64 = 4 };
enum { gje3SrchThrd_65 = 4 };
enum { gje3SrchThrd_66 = 4 };
enum { gje3SrchThrd_67 = 4 };
enum { gje3SrchThrd_68 = 4 };
enum { gje3SrchThrd_69 = 4 };
enum { gje3SrchThrd_70 = 4 };
enum { gje3SrchThrd_71 = 4 };
enum { gje3SrchThrd_72 = 4 };
enum { gje3SrchThrd_73 = 4 };
enum { gje3SrchThrd_74 = 4 };
enum { gje3SrchThrd_75 = 4 };
enum { gje3SrchThrd_76 = 4 };
enum { gje3SrchThrd_77 = 4 };
enum { gje3SrchThrd_78 = 4 };
enum { gje3SrchThrd_79 = 4 };
enum { gje3SrchThrd_80 = 4 };
enum { gje3SrchThrd_81 = 4 };
enum { gje3SrchThrd_82 = 4 };
enum { gje3SrchThrd_83 = 4 };
enum { gje3SrchThrd_84 = 4 };
enum { gje3SrchThrd_85 = 4 };
enum { gje3SrchThrd_86 = 4 };
enum { gje3SrchThrd_87 = 4 };
enum { gje3SrchThrd_88 = 4 };
enum { gje3SrchThrd_89 = 4 };
enum { gje3SrchThrd_90 = 4 };
enum { gje3SrchThrd_91 = 4 };
enum { gje3SrchThrd_92 = 4 };
enum { gje3SrchThrd_93 = 4 };
enum { gje3SrchThrd_94 = 4 };
enum { gje3SrchThrd_95 = 4 };
enum { gje3SrchThrd_96 = 4 };
enum { gje3SrchThrd_97 = 4 };
enum { gje3SrchThrd_98 = 4 };
enum { gje3SrchThrd_99 = 4 };
enum { gje3SrchThrd_100 = 4 };
enum { gje3SrchThrd_101 = 4 };
enum { gje3SrchThrd_102 = 4 };
enum { gje3SrchThrd_103 = 4 };
enum { gje3SrchThrd_104 = 4 };
enum { gje3SrchThrd_105 = 4 };
enum { gje3SrchThrd_106 = 4 };
enum { gje3SrchThrd_107 = 4 };
enum { gje3SrchThrd_108 = 4 };
enum { gje3SrchThrd_109 = 4 };
enum { matInv2x2MinBatch = 1200 };
enum { matInv3x3MinBatch = 1000 };
enum { matInv4x4MinBatch = 900 };
enum { matInv5x5MinBatch = 900 };
enum { matInv6x6MinBatch = 900 };
enum { matInv7x7MinBatch = 1000 };
enum { matInv8x8MinBatch = 1000 };
enum { matInv9x9MinBatch = 1000 };
enum { matInv10x10MinBatch= 1000 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<double,ARCH_SM35> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 6 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 5 };
enum { gje3DimX_11 = 11 };
enum { gje3DimX_12 = 8 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 7 };
enum { gje3DimX_15 = 5 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 5 };
enum { gje3DimX_18 = 5 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 8 };
enum { gje3DimX_21 = 7 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 8 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 6 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 6 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 11 };
enum { gje3DimX_34 = 6 };
enum { gje3DimX_35 = 9 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 10 };
enum { gje3DimX_38 = 10 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 10 };
enum { gje3DimX_41 = 9 };
enum { gje3DimX_42 = 10 };
enum { gje3DimX_43 = 9 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = 9 };
enum { gje3DimX_46 = 12 };
enum { gje3DimX_47 = 12 };
enum { gje3DimX_48 = 12 };
enum { gje3DimX_49 = 12 };
enum { gje3DimX_50 = 12 };
enum { gje3DimX_51 = 9 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 11 };
enum { gje3DimX_54 = 12 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = 12 };
enum { gje3DimX_57 = 11 };
enum { gje3DimX_58 = 12 };
enum { gje3DimX_59 = 12 };
enum { gje3DimX_60 = 12 };
enum { gje3DimX_61 = 12 };
enum { gje3DimX_62 = 12 };
enum { gje3DimX_63 = 11 };
enum { gje3DimX_64 = 12 };
enum { gje3DimX_65 = 12 };
enum { gje3DimX_66 = 12 };
enum { gje3DimX_67 = 12 };
enum { gje3DimX_68 = 12 };
enum { gje3DimX_69 = 12 };
enum { gje3DimX_70 = 12 };
enum { gje3DimX_71 = 9 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 11 };
enum { gje3DimX_74 = 10 };
enum { gje3DimX_75 = 11 };
enum { gje3DimX_76 = 12 };
enum { gje3DimX_77 = 11 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 3 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 2 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 5 };
enum { gje3Pad_32 = 4 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 2 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 5 };
enum { gje3Pad_40 = 2 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 5 };
enum { gje3Pad_48 = 5 };
enum { gje3Pad_49 = 5 };
enum { gje3Pad_50 = 4 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 4 };
enum { gje3Pad_56 = 4 };
enum { gje3Pad_57 = 2 };
enum { gje3Pad_58 = 2 };
enum { gje3Pad_59 = 1 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 1 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 5 };
enum { gje3Pad_64 = 5 };
enum { gje3Pad_65 = 4 };
enum { gje3Pad_66 = 4 };
enum { gje3Pad_67 = 3 };
enum { gje3Pad_68 = 2 };
enum { gje3Pad_69 = 1 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 2 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 2 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 2 };
enum { gje3SrchThrd_17 = 2 };
enum { gje3SrchThrd_18 = 2 };
enum { gje3SrchThrd_19 = 2 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 5 };
enum { gje3SrchThrd_42 = 5 };
enum { gje3SrchThrd_43 = 5 };
enum { gje3SrchThrd_44 = 5 };
enum { gje3SrchThrd_45 = 5 };
enum { gje3SrchThrd_46 = 5 };
enum { gje3SrchThrd_47 = 5 };
enum { gje3SrchThrd_48 = 5 };
enum { gje3SrchThrd_49 = 5 };
enum { gje3SrchThrd_50 = 5 };
enum { gje3SrchThrd_51 = 5 };
enum { gje3SrchThrd_52 = 5 };
enum { gje3SrchThrd_53 = 5 };
enum { gje3SrchThrd_54 = 5 };
enum { gje3SrchThrd_55 = 5 };
enum { gje3SrchThrd_56 = 5 };
enum { gje3SrchThrd_57 = 5 };
enum { gje3SrchThrd_58 = 5 };
enum { gje3SrchThrd_59 = 5 };
enum { gje3SrchThrd_60 = 5 };
enum { gje3SrchThrd_61 = 5 };
enum { gje3SrchThrd_62 = 5 };
enum { gje3SrchThrd_63 = 5 };
enum { gje3SrchThrd_64 = 5 };
enum { gje3SrchThrd_65 = 5 };
enum { gje3SrchThrd_66 = 5 };
enum { gje3SrchThrd_67 = 5 };
enum { gje3SrchThrd_68 = 5 };
enum { gje3SrchThrd_69 = 5 };
enum { gje3SrchThrd_70 = 5 };
enum { gje3SrchThrd_71 = 5 };
enum { gje3SrchThrd_72 = 5 };
enum { gje3SrchThrd_73 = 5 };
enum { gje3SrchThrd_74 = 5 };
enum { gje3SrchThrd_75 = 5 };
enum { gje3SrchThrd_76 = 5 };
enum { gje3SrchThrd_77 = 5 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1300 };
enum { matInv3x3MinBatch = 1100 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1100 };
enum { matInv6x6MinBatch = 1100 };
enum { matInv7x7MinBatch = 1100 };
enum { matInv8x8MinBatch = 1100 };
enum { matInv9x9MinBatch = 1100 };
enum { matInv10x10MinBatch= 1200 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<cuComplex,ARCH_SM35> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 6 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 10 };
enum { gje3DimX_11 = 11 };
enum { gje3DimX_12 = 8 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 7 };
enum { gje3DimX_15 = 5 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 5 };
enum { gje3DimX_18 = 5 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 8 };
enum { gje3DimX_21 = 6 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 8 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 6 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 6 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 11 };
enum { gje3DimX_34 = 6 };
enum { gje3DimX_35 = 7 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 10 };
enum { gje3DimX_38 = 10 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 10 };
enum { gje3DimX_41 = 9 };
enum { gje3DimX_42 = 10 };
enum { gje3DimX_43 = 11 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = 9 };
enum { gje3DimX_46 = 12 };
enum { gje3DimX_47 = 12 };
enum { gje3DimX_48 = 12 };
enum { gje3DimX_49 = 12 };
enum { gje3DimX_50 = 10 };
enum { gje3DimX_51 = 9 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 11 };
enum { gje3DimX_54 = 12 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = 12 };
enum { gje3DimX_57 = 12 };
enum { gje3DimX_58 = 12 };
enum { gje3DimX_59 = 12 };
enum { gje3DimX_60 = 12 };
enum { gje3DimX_61 = 12 };
enum { gje3DimX_62 = 12 };
enum { gje3DimX_63 = 11 };
enum { gje3DimX_64 = 12 };
enum { gje3DimX_65 = 12 };
enum { gje3DimX_66 = 11 };
enum { gje3DimX_67 = 12 };
enum { gje3DimX_68 = 12 };
enum { gje3DimX_69 = 12 };
enum { gje3DimX_70 = 12 };
enum { gje3DimX_71 = 12 };
enum { gje3DimX_72 = 12 };
enum { gje3DimX_73 = 11 };
enum { gje3DimX_74 = 10 };
enum { gje3DimX_75 = 11 };
enum { gje3DimX_76 = 12 };
enum { gje3DimX_77 = 10 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 5 };
enum { gje3Pad_15 = 3 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 1 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 5 };
enum { gje3Pad_32 = 4 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 4 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 5 };
enum { gje3Pad_40 = 2 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 2 };
enum { gje3Pad_48 = 1 };
enum { gje3Pad_49 = 5 };
enum { gje3Pad_50 = 3 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 4 };
enum { gje3Pad_56 = 4 };
enum { gje3Pad_57 = 3 };
enum { gje3Pad_58 = 2 };
enum { gje3Pad_59 = 1 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 1 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 5 };
enum { gje3Pad_65 = 4 };
enum { gje3Pad_66 = 3 };
enum { gje3Pad_67 = 3 };
enum { gje3Pad_68 = 2 };
enum { gje3Pad_69 = 1 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 5 };
enum { gje3Pad_72 = 4 };
enum { gje3Pad_73 = 2 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 3 };
enum { gje3SrchThrd_42 = 3 };
enum { gje3SrchThrd_43 = 3 };
enum { gje3SrchThrd_44 = 5 };
enum { gje3SrchThrd_45 = 5 };
enum { gje3SrchThrd_46 = 5 };
enum { gje3SrchThrd_47 = 5 };
enum { gje3SrchThrd_48 = 5 };
enum { gje3SrchThrd_49 = 5 };
enum { gje3SrchThrd_50 = 5 };
enum { gje3SrchThrd_51 = 5 };
enum { gje3SrchThrd_52 = 5 };
enum { gje3SrchThrd_53 = 5 };
enum { gje3SrchThrd_54 = 5 };
enum { gje3SrchThrd_55 = 5 };
enum { gje3SrchThrd_56 = 5 };
enum { gje3SrchThrd_57 = 5 };
enum { gje3SrchThrd_58 = 5 };
enum { gje3SrchThrd_59 = 5 };
enum { gje3SrchThrd_60 = 5 };
enum { gje3SrchThrd_61 = 5 };
enum { gje3SrchThrd_62 = 5 };
enum { gje3SrchThrd_63 = 5 };
enum { gje3SrchThrd_64 = 5 };
enum { gje3SrchThrd_65 = 5 };
enum { gje3SrchThrd_66 = 5 };
enum { gje3SrchThrd_67 = 5 };
enum { gje3SrchThrd_68 = 5 };
enum { gje3SrchThrd_69 = 5 };
enum { gje3SrchThrd_70 = 5 };
enum { gje3SrchThrd_71 = 5 };
enum { gje3SrchThrd_72 = 5 };
enum { gje3SrchThrd_73 = 5 };
enum { gje3SrchThrd_74 = 5 };
enum { gje3SrchThrd_75 = 5 };
enum { gje3SrchThrd_76 = 5 };
enum { gje3SrchThrd_77 = 6 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1300 };
enum { matInv3x3MinBatch = 1200 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1100 };
enum { matInv6x6MinBatch = 1100 };
enum { matInv7x7MinBatch = 1300 };
enum { matInv8x8MinBatch = 1400 };
enum { matInv9x9MinBatch = 1500 };
enum { matInv10x10MinBatch= 1500 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<cuDoubleComplex,ARCH_SM35> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 55 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 2048 }; /* sm_35, 32 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 6 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 9 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 11 };
enum { gje3DimX_12 = 6 };
enum { gje3DimX_13 = 7 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 8 };
enum { gje3DimX_16 = 8 };
enum { gje3DimX_17 = 9 };
enum { gje3DimX_18 = 6 };
enum { gje3DimX_19 = 8 };
enum { gje3DimX_20 = 8 };
enum { gje3DimX_21 = 7 };
enum { gje3DimX_22 = 8 };
enum { gje3DimX_23 = 8 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 9 };
enum { gje3DimX_26 = 10 };
enum { gje3DimX_27 = 8 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 8 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 11 };
enum { gje3DimX_34 = 12 };
enum { gje3DimX_35 = 12 };
enum { gje3DimX_36 = 12 };
enum { gje3DimX_37 = 10 };
enum { gje3DimX_38 = 10 };
enum { gje3DimX_39 = 11 };
enum { gje3DimX_40 = 10 };
enum { gje3DimX_41 = 12 };
enum { gje3DimX_42 = 12 };
enum { gje3DimX_43 = 11 };
enum { gje3DimX_44 = 12 };
enum { gje3DimX_45 = 12 };
enum { gje3DimX_46 = 12 };
enum { gje3DimX_47 = 12 };
enum { gje3DimX_48 = 12 };
enum { gje3DimX_49 = 10 };
enum { gje3DimX_50 = 10 };
enum { gje3DimX_51 = 11 };
enum { gje3DimX_52 = 12 };
enum { gje3DimX_53 = 12 };
enum { gje3DimX_54 = 11 };
enum { gje3DimX_55 = 11 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 2 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 3 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 2 };
enum { gje3Pad_35 = 1 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 4 };
enum { gje3Pad_40 = 2 };
enum { gje3Pad_41 = 3 };
enum { gje3Pad_42 = 2 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 5 };
enum { gje3Pad_48 = 4 };
enum { gje3Pad_49 = 1 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 1 };
enum { gje3Pad_54 = 1 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 2 };
enum { gje3SrchThrd_14 = 2 };
enum { gje3SrchThrd_15 = 2 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 3 };
enum { gje3SrchThrd_42 = 3 };
enum { gje3SrchThrd_43 = 3 };
enum { gje3SrchThrd_44 = 3 };
enum { gje3SrchThrd_45 = 3 };
enum { gje3SrchThrd_46 = 3 };
enum { gje3SrchThrd_47 = 4 };
enum { gje3SrchThrd_48 = 4 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 4 };
enum { gje3SrchThrd_55 = 4 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1200 };
enum { matInv3x3MinBatch = 1100 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1200 };
enum { matInv6x6MinBatch = 1200 };
enum { matInv7x7MinBatch = 1500 };
enum { matInv8x8MinBatch = 7700 };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 8 };
};
template<> class config<float,ARCH_SM20> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim =109 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1536 }; /* sm_2x, 21 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 5 };
enum { gje3DimX_11 = 5 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 4 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 4 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 3 };
enum { gje3DimX_19 = 5 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 3 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 3 };
enum { gje3DimX_29 = 3 };
enum { gje3DimX_30 = 2 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 3 };
enum { gje3DimX_33 = 2 };
enum { gje3DimX_34 = 2 };
enum { gje3DimX_35 = 4 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 2 };
enum { gje3DimX_38 = 2 };
enum { gje3DimX_39 = 4 };
enum { gje3DimX_40 = 3 };
enum { gje3DimX_41 = 3 };
enum { gje3DimX_42 = 3 };
enum { gje3DimX_43 = 2 };
enum { gje3DimX_44 = 2 };
enum { gje3DimX_45 = 4 };
enum { gje3DimX_46 = 2 };
enum { gje3DimX_47 = 4 };
enum { gje3DimX_48 = 4 };
enum { gje3DimX_49 = 3 };
enum { gje3DimX_50 = 3 };
enum { gje3DimX_51 = 3 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 3 };
enum { gje3DimX_54 = 4 };
enum { gje3DimX_55 = 4 };
enum { gje3DimX_56 = 4 };
enum { gje3DimX_57 = 5 };
enum { gje3DimX_58 = 6 };
enum { gje3DimX_59 = 4 };
enum { gje3DimX_60 = 4 };
enum { gje3DimX_61 = 4 };
enum { gje3DimX_62 = 4 };
enum { gje3DimX_63 = 7 };
enum { gje3DimX_64 = 8 };
enum { gje3DimX_65 = 8 };
enum { gje3DimX_66 = 6 };
enum { gje3DimX_67 = 5 };
enum { gje3DimX_68 = 4 };
enum { gje3DimX_69 = 5 };
enum { gje3DimX_70 = 5 };
enum { gje3DimX_71 = 4 };
enum { gje3DimX_72 = 6 };
enum { gje3DimX_73 = 5 };
enum { gje3DimX_74 = 5 };
enum { gje3DimX_75 = 6 };
enum { gje3DimX_76 = 4 };
enum { gje3DimX_77 = 7 };
enum { gje3DimX_78 = 8 };
enum { gje3DimX_79 = 8 };
enum { gje3DimX_80 = 8 };
enum { gje3DimX_81 = 9 };
enum { gje3DimX_82 = 7 };
enum { gje3DimX_83 = 6 };
enum { gje3DimX_84 = 6 };
enum { gje3DimX_85 = 6 };
enum { gje3DimX_86 = 8 };
enum { gje3DimX_87 = 8 };
enum { gje3DimX_88 = 8 };
enum { gje3DimX_89 = 7 };
enum { gje3DimX_90 = 7 };
enum { gje3DimX_91 = 7 };
enum { gje3DimX_92 = 6 };
enum { gje3DimX_93 = 6 };
enum { gje3DimX_94 = 6 };
enum { gje3DimX_95 = 8 };
enum { gje3DimX_96 = 8 };
enum { gje3DimX_97 = 10 };
enum { gje3DimX_98 = 6 };
enum { gje3DimX_99 = 5 };
enum { gje3DimX_100 = 4 };
enum { gje3DimX_101 = 5 };
enum { gje3DimX_102 = 6 };
enum { gje3DimX_103 = 7 };
enum { gje3DimX_104 = 8 };
enum { gje3DimX_105 = 7 };
enum { gje3DimX_106 = 6 };
enum { gje3DimX_107 = 7 };
enum { gje3DimX_108 = 4 };
enum { gje3DimX_109 = 7 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 1 };
enum { gje3Pad_03 = 1 };
enum { gje3Pad_04 = 1 };
enum { gje3Pad_05 = 1 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 2 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 1 };
enum { gje3Pad_10 = 2 };
enum { gje3Pad_11 = 1 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 4 };
enum { gje3Pad_15 = 5 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 1 };
enum { gje3Pad_20 = 3 };
enum { gje3Pad_21 = 1 };
enum { gje3Pad_22 = 5 };
enum { gje3Pad_23 = 5 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 2 };
enum { gje3Pad_26 = 4 };
enum { gje3Pad_27 = 2 };
enum { gje3Pad_28 = 1 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 4 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 1 };
enum { gje3Pad_34 = 4 };
enum { gje3Pad_35 = 1 };
enum { gje3Pad_36 = 1 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 4 };
enum { gje3Pad_39 = 5 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 1 };
enum { gje3Pad_42 = 4 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 1 };
enum { gje3Pad_46 = 4 };
enum { gje3Pad_47 = 5 };
enum { gje3Pad_48 = 4 };
enum { gje3Pad_49 = 1 };
enum { gje3Pad_50 = 4 };
enum { gje3Pad_51 = 3 };
enum { gje3Pad_52 = 3 };
enum { gje3Pad_53 = 1 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 5 };
enum { gje3Pad_56 = 4 };
enum { gje3Pad_57 = 2 };
enum { gje3Pad_58 = 1 };
enum { gje3Pad_59 = 1 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 1 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 5 };
enum { gje3Pad_64 = 4 };
enum { gje3Pad_65 = 3 };
enum { gje3Pad_66 = 4 };
enum { gje3Pad_67 = 2 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 3 };
enum { gje3Pad_71 = 5 };
enum { gje3Pad_72 = 3 };
enum { gje3Pad_73 = 3 };
enum { gje3Pad_74 = 2 };
enum { gje3Pad_75 = 2 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 5 };
enum { gje3Pad_79 = 5 };
enum { gje3Pad_80 = 4 };
enum { gje3Pad_81 = 4 };
enum { gje3Pad_82 = 1 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 3 };
enum { gje3Pad_85 = 2 };
enum { gje3Pad_86 = 2 };
enum { gje3Pad_87 = 1 };
enum { gje3Pad_88 = 1 };
enum { gje3Pad_89 = 1 };
enum { gje3Pad_90 = 1 };
enum { gje3Pad_91 = 1 };
enum { gje3Pad_92 = 1 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 5 };
enum { gje3Pad_95 = 5 };
enum { gje3Pad_96 = 4 };
enum { gje3Pad_97 = 5 };
enum { gje3Pad_98 = 4 };
enum { gje3Pad_99 = 2 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 2 };
enum { gje3Pad_104 = 1 };
enum { gje3Pad_105 = 4 };
enum { gje3Pad_106 = 2 };
enum { gje3Pad_107 = 2 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 1 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = 4 };
enum { gje3SrchThrd_46 = 4 };
enum { gje3SrchThrd_47 = 4 };
enum { gje3SrchThrd_48 = 4 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 4 };
enum { gje3SrchThrd_55 = 4 };
enum { gje3SrchThrd_56 = 4 };
enum { gje3SrchThrd_57 = 4 };
enum { gje3SrchThrd_58 = 4 };
enum { gje3SrchThrd_59 = 4 };
enum { gje3SrchThrd_60 = 4 };
enum { gje3SrchThrd_61 = 4 };
enum { gje3SrchThrd_62 = 4 };
enum { gje3SrchThrd_63 = 4 };
enum { gje3SrchThrd_64 = 4 };
enum { gje3SrchThrd_65 = 4 };
enum { gje3SrchThrd_66 = 5 };
enum { gje3SrchThrd_67 = 5 };
enum { gje3SrchThrd_68 = 5 };
enum { gje3SrchThrd_69 = 5 };
enum { gje3SrchThrd_70 = 5 };
enum { gje3SrchThrd_71 = 5 };
enum { gje3SrchThrd_72 = 5 };
enum { gje3SrchThrd_73 = 5 };
enum { gje3SrchThrd_74 = 5 };
enum { gje3SrchThrd_75 = 5 };
enum { gje3SrchThrd_76 = 5 };
enum { gje3SrchThrd_77 = 5 };
enum { gje3SrchThrd_78 = 5 };
enum { gje3SrchThrd_79 = 5 };
enum { gje3SrchThrd_80 = 5 };
enum { gje3SrchThrd_81 = 5 };
enum { gje3SrchThrd_82 = 5 };
enum { gje3SrchThrd_83 = 5 };
enum { gje3SrchThrd_84 = 6 };
enum { gje3SrchThrd_85 = 6 };
enum { gje3SrchThrd_86 = 6 };
enum { gje3SrchThrd_87 = 6 };
enum { gje3SrchThrd_88 = 6 };
enum { gje3SrchThrd_89 = 6 };
enum { gje3SrchThrd_90 = 6 };
enum { gje3SrchThrd_91 = 6 };
enum { gje3SrchThrd_92 = 6 };
enum { gje3SrchThrd_93 = 6 };
enum { gje3SrchThrd_94 = 6 };
enum { gje3SrchThrd_95 = 6 };
enum { gje3SrchThrd_96 = 6 };
enum { gje3SrchThrd_97 = 6 };
enum { gje3SrchThrd_98 = 6 };
enum { gje3SrchThrd_99 = 6 };
enum { gje3SrchThrd_100 = 6 };
enum { gje3SrchThrd_101 = 6 };
enum { gje3SrchThrd_102 = 6 };
enum { gje3SrchThrd_103 = 6 };
enum { gje3SrchThrd_104 = 6 };
enum { gje3SrchThrd_105 = 6 };
enum { gje3SrchThrd_106 = 6 };
enum { gje3SrchThrd_107 = 6 };
enum { gje3SrchThrd_108 = 6 };
enum { gje3SrchThrd_109 = 6 };
enum { matInv2x2MinBatch = 1700 };
enum { matInv3x3MinBatch = 1400 };
enum { matInv4x4MinBatch = 1400 };
enum { matInv5x5MinBatch = 1300 };
enum { matInv6x6MinBatch = 1400 };
enum { matInv7x7MinBatch = 1200 };
enum { matInv8x8MinBatch = 1200 };
enum { matInv9x9MinBatch = 1200 };
enum { matInv10x10MinBatch= 1300 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<double,ARCH_SM20> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1408 }; /* sm_2x, 23 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 7 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 5 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 4 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 4 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 3 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 3 };
enum { gje3DimX_30 = 3 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 3 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 4 };
enum { gje3DimX_35 = 3 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 4 };
enum { gje3DimX_39 = 4 };
enum { gje3DimX_40 = 4 };
enum { gje3DimX_41 = 6 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 5 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 7 };
enum { gje3DimX_46 = 6 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 8 };
enum { gje3DimX_50 = 4 };
enum { gje3DimX_51 = 5 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 5 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 7 };
enum { gje3DimX_56 = 9 };
enum { gje3DimX_57 = 9 };
enum { gje3DimX_58 = 10 };
enum { gje3DimX_59 = 7 };
enum { gje3DimX_60 = 8 };
enum { gje3DimX_61 = 7 };
enum { gje3DimX_62 = 7 };
enum { gje3DimX_63 = 7 };
enum { gje3DimX_64 = 8 };
enum { gje3DimX_65 = 8 };
enum { gje3DimX_66 = 8 };
enum { gje3DimX_67 = 8 };
enum { gje3DimX_68 = 8 };
enum { gje3DimX_69 = 5 };
enum { gje3DimX_70 = 6 };
enum { gje3DimX_71 = 7 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 9 };
enum { gje3DimX_74 = 6 };
enum { gje3DimX_75 = 7 };
enum { gje3DimX_76 = 7 };
enum { gje3DimX_77 = 7 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 4 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 4 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 2 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 2 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 4 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 1 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 2 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 2 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 4 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 4 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 4 };
enum { gje3SrchThrd_38 = 4 };
enum { gje3SrchThrd_39 = 4 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = 4 };
enum { gje3SrchThrd_46 = 4 };
enum { gje3SrchThrd_47 = 4 };
enum { gje3SrchThrd_48 = 4 };
enum { gje3SrchThrd_49 = 4 };
enum { gje3SrchThrd_50 = 4 };
enum { gje3SrchThrd_51 = 4 };
enum { gje3SrchThrd_52 = 4 };
enum { gje3SrchThrd_53 = 4 };
enum { gje3SrchThrd_54 = 5 };
enum { gje3SrchThrd_55 = 6 };
enum { gje3SrchThrd_56 = 6 };
enum { gje3SrchThrd_57 = 6 };
enum { gje3SrchThrd_58 = 6 };
enum { gje3SrchThrd_59 = 6 };
enum { gje3SrchThrd_60 = 6 };
enum { gje3SrchThrd_61 = 6 };
enum { gje3SrchThrd_62 = 6 };
enum { gje3SrchThrd_63 = 6 };
enum { gje3SrchThrd_64 = 6 };
enum { gje3SrchThrd_65 = 6 };
enum { gje3SrchThrd_66 = 6 };
enum { gje3SrchThrd_67 = 6 };
enum { gje3SrchThrd_68 = 6 };
enum { gje3SrchThrd_69 = 6 };
enum { gje3SrchThrd_70 = 6 };
enum { gje3SrchThrd_71 = 6 };
enum { gje3SrchThrd_72 = 6 };
enum { gje3SrchThrd_73 = 6 };
enum { gje3SrchThrd_74 = 6 };
enum { gje3SrchThrd_75 = 6 };
enum { gje3SrchThrd_76 = 6 };
enum { gje3SrchThrd_77 = 6 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1800 };
enum { matInv3x3MinBatch = 1400 };
enum { matInv4x4MinBatch = 1300 };
enum { matInv5x5MinBatch = 1200 };
enum { matInv6x6MinBatch = 1200 };
enum { matInv7x7MinBatch = 1200 };
enum { matInv8x8MinBatch = 0x7fffffff };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 7 };
};
template<> class config<cuComplex,ARCH_SM20> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 77 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1408 }; /* sm_2x, 23 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 5 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 5 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 4 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 3 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 4 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 3 };
enum { gje3DimX_30 = 3 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 3 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 4 };
enum { gje3DimX_35 = 3 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 4 };
enum { gje3DimX_39 = 5 };
enum { gje3DimX_40 = 4 };
enum { gje3DimX_41 = 6 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 5 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 5 };
enum { gje3DimX_46 = 6 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 7 };
enum { gje3DimX_50 = 4 };
enum { gje3DimX_51 = 5 };
enum { gje3DimX_52 = 4 };
enum { gje3DimX_53 = 5 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 7 };
enum { gje3DimX_56 = 9 };
enum { gje3DimX_57 = 9 };
enum { gje3DimX_58 = 10 };
enum { gje3DimX_59 = 7 };
enum { gje3DimX_60 = 8 };
enum { gje3DimX_61 = 7 };
enum { gje3DimX_62 = 7 };
enum { gje3DimX_63 = 7 };
enum { gje3DimX_64 = 8 };
enum { gje3DimX_65 = 8 };
enum { gje3DimX_66 = 8 };
enum { gje3DimX_67 = 8 };
enum { gje3DimX_68 = 7 };
enum { gje3DimX_69 = 7 };
enum { gje3DimX_70 = 6 };
enum { gje3DimX_71 = 7 };
enum { gje3DimX_72 = 9 };
enum { gje3DimX_73 = 7 };
enum { gje3DimX_74 = 6 };
enum { gje3DimX_75 = 7 };
enum { gje3DimX_76 = 4 };
enum { gje3DimX_77 = 7 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 5 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 0 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 5 };
enum { gje3Pad_15 = 5 };
enum { gje3Pad_16 = 4 };
enum { gje3Pad_17 = 2 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 4 };
enum { gje3Pad_23 = 5 };
enum { gje3Pad_24 = 4 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 3 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 2 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 3 };
enum { gje3Pad_40 = 4 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 1 };
enum { gje3Pad_43 = 1 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 3 };
enum { gje3Pad_49 = 5 };
enum { gje3Pad_50 = 2 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 1 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 5 };
enum { gje3Pad_68 = 3 };
enum { gje3Pad_69 = 2 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 1 };
enum { gje3Pad_73 = 2 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 3 };
enum { gje3SrchThrd_06 = 3 };
enum { gje3SrchThrd_07 = 3 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 4 };
enum { gje3SrchThrd_26 = 4 };
enum { gje3SrchThrd_27 = 4 };
enum { gje3SrchThrd_28 = 4 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 5 };
enum { gje3SrchThrd_36 = 5 };
enum { gje3SrchThrd_37 = 5 };
enum { gje3SrchThrd_38 = 5 };
enum { gje3SrchThrd_39 = 5 };
enum { gje3SrchThrd_40 = 5 };
enum { gje3SrchThrd_41 = 5 };
enum { gje3SrchThrd_42 = 5 };
enum { gje3SrchThrd_43 = 5 };
enum { gje3SrchThrd_44 = 5 };
enum { gje3SrchThrd_45 = 5 };
enum { gje3SrchThrd_46 = 5 };
enum { gje3SrchThrd_47 = 5 };
enum { gje3SrchThrd_48 = 5 };
enum { gje3SrchThrd_49 = 5 };
enum { gje3SrchThrd_50 = 6 };
enum { gje3SrchThrd_51 = 6 };
enum { gje3SrchThrd_52 = 6 };
enum { gje3SrchThrd_53 = 6 };
enum { gje3SrchThrd_54 = 6 };
enum { gje3SrchThrd_55 = 6 };
enum { gje3SrchThrd_56 = 6 };
enum { gje3SrchThrd_57 = 6 };
enum { gje3SrchThrd_58 = 6 };
enum { gje3SrchThrd_59 = 7 };
enum { gje3SrchThrd_60 = 7 };
enum { gje3SrchThrd_61 = 7 };
enum { gje3SrchThrd_62 = 7 };
enum { gje3SrchThrd_63 = 7 };
enum { gje3SrchThrd_64 = 7 };
enum { gje3SrchThrd_65 = 7 };
enum { gje3SrchThrd_66 = 7 };
enum { gje3SrchThrd_67 = 7 };
enum { gje3SrchThrd_68 = 7 };
enum { gje3SrchThrd_69 = 7 };
enum { gje3SrchThrd_70 = 7 };
enum { gje3SrchThrd_71 = 7 };
enum { gje3SrchThrd_72 = 7 };
enum { gje3SrchThrd_73 = 7 };
enum { gje3SrchThrd_74 = 7 };
enum { gje3SrchThrd_75 = 7 };
enum { gje3SrchThrd_76 = 7 };
enum { gje3SrchThrd_77 = 8 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1700 };
enum { matInv3x3MinBatch = 1300 };
enum { matInv4x4MinBatch = 1200 };
enum { matInv5x5MinBatch = 1200 };
enum { matInv6x6MinBatch = 1000 };
enum { matInv7x7MinBatch = 1100 };
enum { matInv8x8MinBatch = 1650 };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 8 };
};
template<> class config<cuDoubleComplex,ARCH_SM20> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 55 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1152 }; /* sm_2x, 28 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 5 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 8 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 6 };
enum { gje3DimX_11 = 6 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 5 };
enum { gje3DimX_14 = 4 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 3 };
enum { gje3DimX_18 = 4 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 4 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 4 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 5 };
enum { gje3DimX_30 = 6 };
enum { gje3DimX_31 = 7 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 8 };
enum { gje3DimX_34 = 8 };
enum { gje3DimX_35 = 8 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 6 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 8 };
enum { gje3DimX_41 = 8 };
enum { gje3DimX_42 = 8 };
enum { gje3DimX_43 = 8 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = 8 };
enum { gje3DimX_46 = 8 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 8 };
enum { gje3DimX_50 = 8 };
enum { gje3DimX_51 = 8 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 8 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 8 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 0 };
enum { gje3Pad_07 = 4 };
enum { gje3Pad_08 = 2 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 4 };
enum { gje3Pad_11 = 3 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 0 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 2 };
enum { gje3Pad_17 = 2 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 0 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 4 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 1 };
enum { gje3Pad_33 = 0 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 1 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 4 };
enum { gje3SrchThrd_22 = 4 };
enum { gje3SrchThrd_23 = 4 };
enum { gje3SrchThrd_24 = 4 };
enum { gje3SrchThrd_25 = 4 };
enum { gje3SrchThrd_26 = 4 };
enum { gje3SrchThrd_27 = 4 };
enum { gje3SrchThrd_28 = 4 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 4 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 6 };
enum { gje3SrchThrd_38 = 6 };
enum { gje3SrchThrd_39 = 6 };
enum { gje3SrchThrd_40 = 6 };
enum { gje3SrchThrd_41 = 6 };
enum { gje3SrchThrd_42 = 6 };
enum { gje3SrchThrd_43 = 6 };
enum { gje3SrchThrd_44 = 6 };
enum { gje3SrchThrd_45 = 6 };
enum { gje3SrchThrd_46 = 7 };
enum { gje3SrchThrd_47 = 7 };
enum { gje3SrchThrd_48 = 7 };
enum { gje3SrchThrd_49 = 7 };
enum { gje3SrchThrd_50 = 7 };
enum { gje3SrchThrd_51 = 7 };
enum { gje3SrchThrd_52 = 7 };
enum { gje3SrchThrd_53 = 7 };
enum { gje3SrchThrd_54 = 7 };
enum { gje3SrchThrd_55 = 7 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 1600 };
enum { matInv3x3MinBatch = 1300 };
enum { matInv4x4MinBatch = 1100 };
enum { matInv5x5MinBatch = 1600 };
enum { matInv6x6MinBatch = 0x7fffffff };
enum { matInv7x7MinBatch = 0x7fffffff };
enum { matInv8x8MinBatch = 0x7fffffff };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 5 };
};
template<> class config<float,ARCH_SM13> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 62 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 1024 }; /* sm_13, 16 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 5 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 4 };
enum { gje3DimX_08 = 4 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 4 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 2 };
enum { gje3DimX_14 = 2 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 2 };
enum { gje3DimX_17 = 2 };
enum { gje3DimX_18 = 2 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 2 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 2 };
enum { gje3DimX_23 = 2 };
enum { gje3DimX_24 = 2 };
enum { gje3DimX_25 = 3 };
enum { gje3DimX_26 = 2 };
enum { gje3DimX_27 = 3 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 2 };
enum { gje3DimX_30 = 2 };
enum { gje3DimX_31 = 3 };
enum { gje3DimX_32 = 2 };
enum { gje3DimX_33 = 3 };
enum { gje3DimX_34 = 2 };
enum { gje3DimX_35 = 5 };
enum { gje3DimX_36 = 4 };
enum { gje3DimX_37 = 5 };
enum { gje3DimX_38 = 5 };
enum { gje3DimX_39 = 3 };
enum { gje3DimX_40 = 4 };
enum { gje3DimX_41 = 3 };
enum { gje3DimX_42 = 3 };
enum { gje3DimX_43 = 5 };
enum { gje3DimX_44 = 4 };
enum { gje3DimX_45 = 7 };
enum { gje3DimX_46 = 8 };
enum { gje3DimX_47 = 8 };
enum { gje3DimX_48 = 8 };
enum { gje3DimX_49 = 7 };
enum { gje3DimX_50 = 8 };
enum { gje3DimX_51 = 5 };
enum { gje3DimX_52 = 8 };
enum { gje3DimX_53 = 5 };
enum { gje3DimX_54 = 6 };
enum { gje3DimX_55 = 7 };
enum { gje3DimX_56 = 8 };
enum { gje3DimX_57 = 5 };
enum { gje3DimX_58 = 6 };
enum { gje3DimX_59 = 7 };
enum { gje3DimX_60 = 4 };
enum { gje3DimX_61 = 7 };
enum { gje3DimX_62 = 8 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 0 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 4 };
enum { gje3Pad_08 = 4 };
enum { gje3Pad_09 = 4 };
enum { gje3Pad_10 = 0 };
enum { gje3Pad_11 = 1 };
enum { gje3Pad_12 = 0 };
enum { gje3Pad_13 = 1 };
enum { gje3Pad_14 = 4 };
enum { gje3Pad_15 = 3 };
enum { gje3Pad_16 = 2 };
enum { gje3Pad_17 = 1 };
enum { gje3Pad_18 = 0 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 2 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 2 };
enum { gje3Pad_25 = 1 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 0 };
enum { gje3Pad_29 = 1 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 4 };
enum { gje3Pad_32 = 2 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 2 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 3 };
enum { gje3Pad_40 = 4 };
enum { gje3Pad_41 = 1 };
enum { gje3Pad_42 = 3 };
enum { gje3Pad_43 = 1 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 3 };
enum { gje3Pad_49 = 3 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 2 };
enum { gje3Pad_52 = 4 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 1 };
enum { gje3Pad_57 = 3 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 1 };
enum { gje3SrchThrd_04 = 1 };
enum { gje3SrchThrd_05 = 1 };
enum { gje3SrchThrd_06 = 1 };
enum { gje3SrchThrd_07 = 1 };
enum { gje3SrchThrd_08 = 1 };
enum { gje3SrchThrd_09 = 1 };
enum { gje3SrchThrd_10 = 1 };
enum { gje3SrchThrd_11 = 1 };
enum { gje3SrchThrd_12 = 1 };
enum { gje3SrchThrd_13 = 1 };
enum { gje3SrchThrd_14 = 1 };
enum { gje3SrchThrd_15 = 1 };
enum { gje3SrchThrd_16 = 1 };
enum { gje3SrchThrd_17 = 1 };
enum { gje3SrchThrd_18 = 1 };
enum { gje3SrchThrd_19 = 1 };
enum { gje3SrchThrd_20 = 1 };
enum { gje3SrchThrd_21 = 1 };
enum { gje3SrchThrd_22 = 1 };
enum { gje3SrchThrd_23 = 1 };
enum { gje3SrchThrd_24 = 1 };
enum { gje3SrchThrd_25 = 1 };
enum { gje3SrchThrd_26 = 1 };
enum { gje3SrchThrd_27 = 1 };
enum { gje3SrchThrd_28 = 1 };
enum { gje3SrchThrd_29 = 1 };
enum { gje3SrchThrd_30 = 1 };
enum { gje3SrchThrd_31 = 2 };
enum { gje3SrchThrd_32 = 2 };
enum { gje3SrchThrd_33 = 2 };
enum { gje3SrchThrd_34 = 2 };
enum { gje3SrchThrd_35 = 2 };
enum { gje3SrchThrd_36 = 3 };
enum { gje3SrchThrd_37 = 3 };
enum { gje3SrchThrd_38 = 3 };
enum { gje3SrchThrd_39 = 3 };
enum { gje3SrchThrd_40 = 3 };
enum { gje3SrchThrd_41 = 3 };
enum { gje3SrchThrd_42 = 3 };
enum { gje3SrchThrd_43 = 3 };
enum { gje3SrchThrd_44 = 3 };
enum { gje3SrchThrd_45 = 3 };
enum { gje3SrchThrd_46 = 3 };
enum { gje3SrchThrd_47 = 3 };
enum { gje3SrchThrd_48 = 3 };
enum { gje3SrchThrd_49 = 3 };
enum { gje3SrchThrd_50 = 3 };
enum { gje3SrchThrd_51 = 3 };
enum { gje3SrchThrd_52 = 3 };
enum { gje3SrchThrd_53 = 3 };
enum { gje3SrchThrd_54 = 3 };
enum { gje3SrchThrd_55 = 3 };
enum { gje3SrchThrd_56 = 3 };
enum { gje3SrchThrd_57 = 3 };
enum { gje3SrchThrd_58 = 3 };
enum { gje3SrchThrd_59 = 3 };
enum { gje3SrchThrd_60 = 3 };
enum { gje3SrchThrd_61 = 3 };
enum { gje3SrchThrd_62 = 3 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 35000 };
enum { matInv3x3MinBatch = 45000 };
enum { matInv4x4MinBatch = 40000 };
enum { matInv5x5MinBatch = 25000 };
enum { matInv6x6MinBatch = 15000 };
enum { matInv7x7MinBatch = 11000 };
enum { matInv8x8MinBatch = 9500 };
enum { matInv9x9MinBatch = 9000 };
enum { matInv10x10MinBatch= 6000 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<double,ARCH_SM13> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 44 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds =768 }; /* sm_13, 21 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 3 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 2 };
enum { gje3DimX_08 = 2 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 2 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 2 };
enum { gje3DimX_14 = 2 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 2 };
enum { gje3DimX_17 = 2 };
enum { gje3DimX_18 = 2 };
enum { gje3DimX_19 = 3 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 3 };
enum { gje3DimX_22 = 4 };
enum { gje3DimX_23 = 2 };
enum { gje3DimX_24 = 2 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 4 };
enum { gje3DimX_27 = 4 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 5 };
enum { gje3DimX_30 = 4 };
enum { gje3DimX_31 = 2 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 7 };
enum { gje3DimX_34 = 7 };
enum { gje3DimX_35 = 7 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 8 };
enum { gje3DimX_38 = 8 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 8 };
enum { gje3DimX_41 = 7 };
enum { gje3DimX_42 = 6 };
enum { gje3DimX_43 = 8 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = -1 };
enum { gje3DimX_46 = -1 };
enum { gje3DimX_47 = -1 };
enum { gje3DimX_48 = -1 };
enum { gje3DimX_49 = -1 };
enum { gje3DimX_50 = -1 };
enum { gje3DimX_51 = -1 };
enum { gje3DimX_52 = -1 };
enum { gje3DimX_53 = -1 };
enum { gje3DimX_54 = -1 };
enum { gje3DimX_55 = -1 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 2 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 4 };
enum { gje3Pad_08 = 3 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 1 };
enum { gje3Pad_11 = 2 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 2 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 2 };
enum { gje3Pad_20 = 2 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 4 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 4 };
enum { gje3Pad_26 = 4 };
enum { gje3Pad_27 = 3 };
enum { gje3Pad_28 = 2 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 1 };
enum { gje3Pad_33 = 2 };
enum { gje3Pad_34 = 1 };
enum { gje3Pad_35 = 4 };
enum { gje3Pad_36 = 3 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 3 };
enum { gje3Pad_39 = 2 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 4 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 1 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 0 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 2 };
enum { gje3SrchThrd_11 = 2 };
enum { gje3SrchThrd_12 = 2 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 3 };
enum { gje3SrchThrd_32 = 3 };
enum { gje3SrchThrd_33 = 3 };
enum { gje3SrchThrd_34 = 3 };
enum { gje3SrchThrd_35 = 3 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 4 };
enum { gje3SrchThrd_38 = 4 };
enum { gje3SrchThrd_39 = 4 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = -1 };
enum { gje3SrchThrd_46 = -1 };
enum { gje3SrchThrd_47 = -1 };
enum { gje3SrchThrd_48 = -1 };
enum { gje3SrchThrd_49 = -1 };
enum { gje3SrchThrd_50 = -1 };
enum { gje3SrchThrd_51 = -1 };
enum { gje3SrchThrd_52 = -1 };
enum { gje3SrchThrd_53 = -1 };
enum { gje3SrchThrd_54 = -1 };
enum { gje3SrchThrd_55 = -1 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 40000 };
enum { matInv3x3MinBatch = 28000 };
enum { matInv4x4MinBatch = 17000 };
enum { matInv5x5MinBatch = 14000 };
enum { matInv6x6MinBatch = 11000 };
enum { matInv7x7MinBatch = 8500 };
enum { matInv8x8MinBatch = 13000 };
enum { matInv9x9MinBatch = 17000 };
enum { matInv10x10MinBatch= 30000 };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 10 };
};
template<> class config<cuComplex,ARCH_SM13> {
public:
typedef float absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 44 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds =832 }; /* sm_13, 19 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 3 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 2 };
enum { gje3DimX_08 = 2 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 2 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 2 };
enum { gje3DimX_14 = 2 };
enum { gje3DimX_15 = 2 };
enum { gje3DimX_16 = 2 };
enum { gje3DimX_17 = 2 };
enum { gje3DimX_18 = 2 };
enum { gje3DimX_19 = 2 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 2 };
enum { gje3DimX_22 = 8 };
enum { gje3DimX_23 = 2 };
enum { gje3DimX_24 = 8 };
enum { gje3DimX_25 = 8 };
enum { gje3DimX_26 = 8 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 8 };
enum { gje3DimX_29 = 8 };
enum { gje3DimX_30 = 8 };
enum { gje3DimX_31 = 8 };
enum { gje3DimX_32 = 8 };
enum { gje3DimX_33 = 8 };
enum { gje3DimX_34 = 8 };
enum { gje3DimX_35 = 8 };
enum { gje3DimX_36 = 8 };
enum { gje3DimX_37 = 8 };
enum { gje3DimX_38 = 8 };
enum { gje3DimX_39 = 8 };
enum { gje3DimX_40 = 8 };
enum { gje3DimX_41 = 8 };
enum { gje3DimX_42 = 8 };
enum { gje3DimX_43 = 8 };
enum { gje3DimX_44 = 8 };
enum { gje3DimX_45 = -1 };
enum { gje3DimX_46 = -1 };
enum { gje3DimX_47 = -1 };
enum { gje3DimX_48 = -1 };
enum { gje3DimX_49 = -1 };
enum { gje3DimX_50 = -1 };
enum { gje3DimX_51 = -1 };
enum { gje3DimX_52 = -1 };
enum { gje3DimX_53 = -1 };
enum { gje3DimX_54 = -1 };
enum { gje3DimX_55 = -1 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 2 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 2 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 1 };
enum { gje3Pad_11 = 2 };
enum { gje3Pad_12 = 2 };
enum { gje3Pad_13 = 2 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 2 };
enum { gje3Pad_20 = 2 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 1 };
enum { gje3Pad_23 = 2 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 1 };
enum { gje3Pad_26 = 1 };
enum { gje3Pad_27 = 4 };
enum { gje3Pad_28 = 1 };
enum { gje3Pad_29 = 2 };
enum { gje3Pad_30 = 1 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 1 };
enum { gje3Pad_33 = 1 };
enum { gje3Pad_34 = 1 };
enum { gje3Pad_35 = 2 };
enum { gje3Pad_36 = 2 };
enum { gje3Pad_37 = 1 };
enum { gje3Pad_38 = 1 };
enum { gje3Pad_39 = 2 };
enum { gje3Pad_40 = 1 };
enum { gje3Pad_41 = 2 };
enum { gje3Pad_42 = 4 };
enum { gje3Pad_43 = 2 };
enum { gje3Pad_44 = 1 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 0 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 2 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 2 };
enum { gje3SrchThrd_09 = 2 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 3 };
enum { gje3SrchThrd_21 = 3 };
enum { gje3SrchThrd_22 = 3 };
enum { gje3SrchThrd_23 = 3 };
enum { gje3SrchThrd_24 = 3 };
enum { gje3SrchThrd_25 = 3 };
enum { gje3SrchThrd_26 = 3 };
enum { gje3SrchThrd_27 = 3 };
enum { gje3SrchThrd_28 = 3 };
enum { gje3SrchThrd_29 = 3 };
enum { gje3SrchThrd_30 = 3 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = 4 };
enum { gje3SrchThrd_33 = 4 };
enum { gje3SrchThrd_34 = 4 };
enum { gje3SrchThrd_35 = 4 };
enum { gje3SrchThrd_36 = 4 };
enum { gje3SrchThrd_37 = 4 };
enum { gje3SrchThrd_38 = 4 };
enum { gje3SrchThrd_39 = 4 };
enum { gje3SrchThrd_40 = 4 };
enum { gje3SrchThrd_41 = 4 };
enum { gje3SrchThrd_42 = 4 };
enum { gje3SrchThrd_43 = 4 };
enum { gje3SrchThrd_44 = 4 };
enum { gje3SrchThrd_45 = -1 };
enum { gje3SrchThrd_46 = -1 };
enum { gje3SrchThrd_47 = -1 };
enum { gje3SrchThrd_48 = -1 };
enum { gje3SrchThrd_49 = -1 };
enum { gje3SrchThrd_50 = -1 };
enum { gje3SrchThrd_51 = -1 };
enum { gje3SrchThrd_52 = -1 };
enum { gje3SrchThrd_53 = -1 };
enum { gje3SrchThrd_54 = -1 };
enum { gje3SrchThrd_55 = -1 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 35000 };
enum { matInv3x3MinBatch = 35000 };
enum { matInv4x4MinBatch = 20000 };
enum { matInv5x5MinBatch = 11000 };
enum { matInv6x6MinBatch = 9000 };
enum { matInv7x7MinBatch = 7000 };
enum { matInv8x8MinBatch = 25000 };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 8 };
};
template<> class config<cuDoubleComplex,ARCH_SM13> {
public:
typedef double absValType;
enum { gje3MinDim = 2 };
enum { gje3MaxDim = 31 };
enum { gje3MinBlks = 1 };
enum { gje3MaxThrds = 640 }; /* sm_13, 25 registers per thread */
enum { gje3DimX_00 = -1 };
enum { gje3DimX_01 = -1 };
enum { gje3DimX_02 = 2 };
enum { gje3DimX_03 = 3 };
enum { gje3DimX_04 = 4 };
enum { gje3DimX_05 = 3 };
enum { gje3DimX_06 = 2 };
enum { gje3DimX_07 = 2 };
enum { gje3DimX_08 = 2 };
enum { gje3DimX_09 = 3 };
enum { gje3DimX_10 = 3 };
enum { gje3DimX_11 = 3 };
enum { gje3DimX_12 = 4 };
enum { gje3DimX_13 = 3 };
enum { gje3DimX_14 = 3 };
enum { gje3DimX_15 = 3 };
enum { gje3DimX_16 = 4 };
enum { gje3DimX_17 = 4 };
enum { gje3DimX_18 = 4 };
enum { gje3DimX_19 = 4 };
enum { gje3DimX_20 = 4 };
enum { gje3DimX_21 = 5 };
enum { gje3DimX_22 = 5 };
enum { gje3DimX_23 = 6 };
enum { gje3DimX_24 = 6 };
enum { gje3DimX_25 = 5 };
enum { gje3DimX_26 = 6 };
enum { gje3DimX_27 = 7 };
enum { gje3DimX_28 = 4 };
enum { gje3DimX_29 = 6 };
enum { gje3DimX_30 = 8 };
enum { gje3DimX_31 = 4 };
enum { gje3DimX_32 = -1 };
enum { gje3DimX_33 = -1 };
enum { gje3DimX_34 = -1 };
enum { gje3DimX_35 = -1 };
enum { gje3DimX_36 = -1 };
enum { gje3DimX_37 = -1 };
enum { gje3DimX_38 = -1 };
enum { gje3DimX_39 = -1 };
enum { gje3DimX_40 = -1 };
enum { gje3DimX_41 = -1 };
enum { gje3DimX_42 = -1 };
enum { gje3DimX_43 = -1 };
enum { gje3DimX_44 = -1 };
enum { gje3DimX_45 = -1 };
enum { gje3DimX_46 = -1 };
enum { gje3DimX_47 = -1 };
enum { gje3DimX_48 = -1 };
enum { gje3DimX_49 = -1 };
enum { gje3DimX_50 = -1 };
enum { gje3DimX_51 = -1 };
enum { gje3DimX_52 = -1 };
enum { gje3DimX_53 = -1 };
enum { gje3DimX_54 = -1 };
enum { gje3DimX_55 = -1 };
enum { gje3DimX_56 = -1 };
enum { gje3DimX_57 = -1 };
enum { gje3DimX_58 = -1 };
enum { gje3DimX_59 = -1 };
enum { gje3DimX_60 = -1 };
enum { gje3DimX_61 = -1 };
enum { gje3DimX_62 = -1 };
enum { gje3DimX_63 = -1 };
enum { gje3DimX_64 = -1 };
enum { gje3DimX_65 = -1 };
enum { gje3DimX_66 = -1 };
enum { gje3DimX_67 = -1 };
enum { gje3DimX_68 = -1 };
enum { gje3DimX_69 = -1 };
enum { gje3DimX_70 = -1 };
enum { gje3DimX_71 = -1 };
enum { gje3DimX_72 = -1 };
enum { gje3DimX_73 = -1 };
enum { gje3DimX_74 = -1 };
enum { gje3DimX_75 = -1 };
enum { gje3DimX_76 = -1 };
enum { gje3DimX_77 = -1 };
enum { gje3DimX_78 = -1 };
enum { gje3DimX_79 = -1 };
enum { gje3DimX_80 = -1 };
enum { gje3DimX_81 = -1 };
enum { gje3DimX_82 = -1 };
enum { gje3DimX_83 = -1 };
enum { gje3DimX_84 = -1 };
enum { gje3DimX_85 = -1 };
enum { gje3DimX_86 = -1 };
enum { gje3DimX_87 = -1 };
enum { gje3DimX_88 = -1 };
enum { gje3DimX_89 = -1 };
enum { gje3DimX_90 = -1 };
enum { gje3DimX_91 = -1 };
enum { gje3DimX_92 = -1 };
enum { gje3DimX_93 = -1 };
enum { gje3DimX_94 = -1 };
enum { gje3DimX_95 = -1 };
enum { gje3DimX_96 = -1 };
enum { gje3DimX_97 = -1 };
enum { gje3DimX_98 = -1 };
enum { gje3DimX_99 = -1 };
enum { gje3DimX_100 = -1 };
enum { gje3DimX_101 = -1 };
enum { gje3DimX_102 = -1 };
enum { gje3DimX_103 = -1 };
enum { gje3DimX_104 = -1 };
enum { gje3DimX_105 = -1 };
enum { gje3DimX_106 = -1 };
enum { gje3DimX_107 = -1 };
enum { gje3DimX_108 = -1 };
enum { gje3DimX_109 = -1 };
enum { gje3Pad_00 = 0 };
enum { gje3Pad_01 = 0 };
enum { gje3Pad_02 = 0 };
enum { gje3Pad_03 = 0 };
enum { gje3Pad_04 = 1 };
enum { gje3Pad_05 = 0 };
enum { gje3Pad_06 = 1 };
enum { gje3Pad_07 = 0 };
enum { gje3Pad_08 = 1 };
enum { gje3Pad_09 = 2 };
enum { gje3Pad_10 = 1 };
enum { gje3Pad_11 = 0 };
enum { gje3Pad_12 = 1 };
enum { gje3Pad_13 = 0 };
enum { gje3Pad_14 = 1 };
enum { gje3Pad_15 = 0 };
enum { gje3Pad_16 = 1 };
enum { gje3Pad_17 = 0 };
enum { gje3Pad_18 = 1 };
enum { gje3Pad_19 = 0 };
enum { gje3Pad_20 = 1 };
enum { gje3Pad_21 = 0 };
enum { gje3Pad_22 = 0 };
enum { gje3Pad_23 = 0 };
enum { gje3Pad_24 = 1 };
enum { gje3Pad_25 = 0 };
enum { gje3Pad_26 = 0 };
enum { gje3Pad_27 = 0 };
enum { gje3Pad_28 = 1 };
enum { gje3Pad_29 = 0 };
enum { gje3Pad_30 = 0 };
enum { gje3Pad_31 = 0 };
enum { gje3Pad_32 = 0 };
enum { gje3Pad_33 = 0 };
enum { gje3Pad_34 = 0 };
enum { gje3Pad_35 = 0 };
enum { gje3Pad_36 = 0 };
enum { gje3Pad_37 = 0 };
enum { gje3Pad_38 = 0 };
enum { gje3Pad_39 = 0 };
enum { gje3Pad_40 = 0 };
enum { gje3Pad_41 = 0 };
enum { gje3Pad_42 = 0 };
enum { gje3Pad_43 = 0 };
enum { gje3Pad_44 = 0 };
enum { gje3Pad_45 = 0 };
enum { gje3Pad_46 = 0 };
enum { gje3Pad_47 = 0 };
enum { gje3Pad_48 = 0 };
enum { gje3Pad_49 = 0 };
enum { gje3Pad_50 = 0 };
enum { gje3Pad_51 = 0 };
enum { gje3Pad_52 = 0 };
enum { gje3Pad_53 = 0 };
enum { gje3Pad_54 = 0 };
enum { gje3Pad_55 = 0 };
enum { gje3Pad_56 = 0 };
enum { gje3Pad_57 = 0 };
enum { gje3Pad_58 = 0 };
enum { gje3Pad_59 = 0 };
enum { gje3Pad_60 = 0 };
enum { gje3Pad_61 = 0 };
enum { gje3Pad_62 = 0 };
enum { gje3Pad_63 = 0 };
enum { gje3Pad_64 = 0 };
enum { gje3Pad_65 = 0 };
enum { gje3Pad_66 = 0 };
enum { gje3Pad_67 = 0 };
enum { gje3Pad_68 = 0 };
enum { gje3Pad_69 = 0 };
enum { gje3Pad_70 = 0 };
enum { gje3Pad_71 = 0 };
enum { gje3Pad_72 = 0 };
enum { gje3Pad_73 = 0 };
enum { gje3Pad_74 = 0 };
enum { gje3Pad_75 = 0 };
enum { gje3Pad_76 = 0 };
enum { gje3Pad_77 = 0 };
enum { gje3Pad_78 = 0 };
enum { gje3Pad_79 = 0 };
enum { gje3Pad_80 = 0 };
enum { gje3Pad_81 = 0 };
enum { gje3Pad_82 = 0 };
enum { gje3Pad_83 = 0 };
enum { gje3Pad_84 = 0 };
enum { gje3Pad_85 = 0 };
enum { gje3Pad_86 = 0 };
enum { gje3Pad_87 = 0 };
enum { gje3Pad_88 = 0 };
enum { gje3Pad_89 = 0 };
enum { gje3Pad_90 = 0 };
enum { gje3Pad_91 = 0 };
enum { gje3Pad_92 = 0 };
enum { gje3Pad_93 = 0 };
enum { gje3Pad_94 = 0 };
enum { gje3Pad_95 = 0 };
enum { gje3Pad_96 = 0 };
enum { gje3Pad_97 = 0 };
enum { gje3Pad_98 = 0 };
enum { gje3Pad_99 = 0 };
enum { gje3Pad_100 = 0 };
enum { gje3Pad_101 = 0 };
enum { gje3Pad_102 = 0 };
enum { gje3Pad_103 = 0 };
enum { gje3Pad_104 = 0 };
enum { gje3Pad_105 = 0 };
enum { gje3Pad_106 = 0 };
enum { gje3Pad_107 = 0 };
enum { gje3Pad_108 = 0 };
enum { gje3Pad_109 = 0 };
enum { gje3SrchThrd_00 = -1 };
enum { gje3SrchThrd_01 = -1 };
enum { gje3SrchThrd_02 = 1 };
enum { gje3SrchThrd_03 = 2 };
enum { gje3SrchThrd_04 = 2 };
enum { gje3SrchThrd_05 = 2 };
enum { gje3SrchThrd_06 = 2 };
enum { gje3SrchThrd_07 = 2 };
enum { gje3SrchThrd_08 = 3 };
enum { gje3SrchThrd_09 = 3 };
enum { gje3SrchThrd_10 = 3 };
enum { gje3SrchThrd_11 = 3 };
enum { gje3SrchThrd_12 = 3 };
enum { gje3SrchThrd_13 = 3 };
enum { gje3SrchThrd_14 = 3 };
enum { gje3SrchThrd_15 = 3 };
enum { gje3SrchThrd_16 = 3 };
enum { gje3SrchThrd_17 = 3 };
enum { gje3SrchThrd_18 = 3 };
enum { gje3SrchThrd_19 = 3 };
enum { gje3SrchThrd_20 = 4 };
enum { gje3SrchThrd_21 = 4 };
enum { gje3SrchThrd_22 = 4 };
enum { gje3SrchThrd_23 = 4 };
enum { gje3SrchThrd_24 = 4 };
enum { gje3SrchThrd_25 = 4 };
enum { gje3SrchThrd_26 = 4 };
enum { gje3SrchThrd_27 = 4 };
enum { gje3SrchThrd_28 = 4 };
enum { gje3SrchThrd_29 = 4 };
enum { gje3SrchThrd_30 = 4 };
enum { gje3SrchThrd_31 = 4 };
enum { gje3SrchThrd_32 = -1 };
enum { gje3SrchThrd_33 = -1 };
enum { gje3SrchThrd_34 = -1 };
enum { gje3SrchThrd_35 = -1 };
enum { gje3SrchThrd_36 = -1 };
enum { gje3SrchThrd_37 = -1 };
enum { gje3SrchThrd_38 = -1 };
enum { gje3SrchThrd_39 = -1 };
enum { gje3SrchThrd_40 = -1 };
enum { gje3SrchThrd_41 = -1 };
enum { gje3SrchThrd_42 = -1 };
enum { gje3SrchThrd_43 = -1 };
enum { gje3SrchThrd_44 = -1 };
enum { gje3SrchThrd_45 = -1 };
enum { gje3SrchThrd_46 = -1 };
enum { gje3SrchThrd_47 = -1 };
enum { gje3SrchThrd_48 = -1 };
enum { gje3SrchThrd_49 = -1 };
enum { gje3SrchThrd_50 = -1 };
enum { gje3SrchThrd_51 = -1 };
enum { gje3SrchThrd_52 = -1 };
enum { gje3SrchThrd_53 = -1 };
enum { gje3SrchThrd_54 = -1 };
enum { gje3SrchThrd_55 = -1 };
enum { gje3SrchThrd_56 = -1 };
enum { gje3SrchThrd_57 = -1 };
enum { gje3SrchThrd_58 = -1 };
enum { gje3SrchThrd_59 = -1 };
enum { gje3SrchThrd_60 = -1 };
enum { gje3SrchThrd_61 = -1 };
enum { gje3SrchThrd_62 = -1 };
enum { gje3SrchThrd_63 = -1 };
enum { gje3SrchThrd_64 = -1 };
enum { gje3SrchThrd_65 = -1 };
enum { gje3SrchThrd_66 = -1 };
enum { gje3SrchThrd_67 = -1 };
enum { gje3SrchThrd_68 = -1 };
enum { gje3SrchThrd_69 = -1 };
enum { gje3SrchThrd_70 = -1 };
enum { gje3SrchThrd_71 = -1 };
enum { gje3SrchThrd_72 = -1 };
enum { gje3SrchThrd_73 = -1 };
enum { gje3SrchThrd_74 = -1 };
enum { gje3SrchThrd_75 = -1 };
enum { gje3SrchThrd_76 = -1 };
enum { gje3SrchThrd_77 = -1 };
enum { gje3SrchThrd_78 = -1 };
enum { gje3SrchThrd_79 = -1 };
enum { gje3SrchThrd_80 = -1 };
enum { gje3SrchThrd_81 = -1 };
enum { gje3SrchThrd_82 = -1 };
enum { gje3SrchThrd_83 = -1 };
enum { gje3SrchThrd_84 = -1 };
enum { gje3SrchThrd_85 = -1 };
enum { gje3SrchThrd_86 = -1 };
enum { gje3SrchThrd_87 = -1 };
enum { gje3SrchThrd_88 = -1 };
enum { gje3SrchThrd_89 = -1 };
enum { gje3SrchThrd_90 = -1 };
enum { gje3SrchThrd_91 = -1 };
enum { gje3SrchThrd_92 = -1 };
enum { gje3SrchThrd_93 = -1 };
enum { gje3SrchThrd_94 = -1 };
enum { gje3SrchThrd_95 = -1 };
enum { gje3SrchThrd_96 = -1 };
enum { gje3SrchThrd_97 = -1 };
enum { gje3SrchThrd_98 = -1 };
enum { gje3SrchThrd_99 = -1 };
enum { gje3SrchThrd_100 = -1 };
enum { gje3SrchThrd_101 = -1 };
enum { gje3SrchThrd_102 = -1 };
enum { gje3SrchThrd_103 = -1 };
enum { gje3SrchThrd_104 = -1 };
enum { gje3SrchThrd_105 = -1 };
enum { gje3SrchThrd_106 = -1 };
enum { gje3SrchThrd_107 = -1 };
enum { gje3SrchThrd_108 = -1 };
enum { gje3SrchThrd_109 = -1 };
enum { matInv2x2MinBatch = 30000 };
enum { matInv3x3MinBatch = 15000 };
enum { matInv4x4MinBatch = 11000 };
enum { matInv5x5MinBatch = 6000 };
enum { matInv6x6MinBatch = 11000 };
enum { matInv7x7MinBatch = 17000 };
enum { matInv8x8MinBatch = 0x7fffffff };
enum { matInv9x9MinBatch = 0x7fffffff };
enum { matInv10x10MinBatch= 0x7fffffff };
enum { matInvMinDim = 2 };
enum { matInvMaxDim = 7 };
};
/* column-major */
#define As(row,col) As[(N+ofs)*(col)+(row)]
#define AsInv(row,col) AsInv[(N+ofs)*(col)+(row)]
#define Ainv(row,col) Ainv[(col)*N+(row)]
#define USE_PIVOTING 1
template<typename T, int arch>
__global__ void matinv_2x2_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 2;
int perm0, perm1;
int icol0, icol1;
T AA00, AA01;
T AA10, AA11;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA01 = A[2];
AA11 = A[3];
perm0 = 0;
perm1 = 1;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
/****************** iteration 1 ***********/
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
}
}
template<typename T, int arch>
__global__ void matinv_3x3_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 3;
int perm0, perm1, perm2;
int icol0, icol1, icol2;
T AA00, AA01, AA02;
T AA10, AA11, AA12;
T AA20, AA21, AA22;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA01 = A[3];
AA11 = A[4];
AA21 = A[5];
AA02 = A[6];
AA12 = A[7];
AA22 = A[8];
perm0 = 0;
perm1 = 1;
perm2 = 2;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
/****************** iteration 2 ****************/
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
}
}
template<typename T, int arch>
__global__ void matinv_4x4_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 4;
int perm0, perm1, perm2, perm3;
int icol0, icol1, icol2, icol3;
T AA00, AA01, AA02, AA03;
T AA10, AA11, AA12, AA13;
T AA20, AA21, AA22, AA23;
T AA30, AA31, AA32, AA33;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA01 = A[4];
AA11 = A[5];
AA21 = A[6];
AA31 = A[7];
AA02 = A[8];
AA12 = A[9];
AA22 = A[10];
AA32 = A[11];
AA03 = A[12];
AA13 = A[13];
AA23 = A[14];
AA33 = A[15];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
/****************** iteration 3 ****************/
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
}
}
template<typename T, int arch>
__global__ void matinv_5x5_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 5;
int perm0, perm1, perm2, perm3, perm4;
int icol0, icol1, icol2, icol3, icol4;
T AA00, AA01, AA02, AA03, AA04;
T AA10, AA11, AA12, AA13, AA14;
T AA20, AA21, AA22, AA23, AA24;
T AA30, AA31, AA32, AA33, AA34;
T AA40, AA41, AA42, AA43, AA44;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA01 = A[5];
AA11 = A[6];
AA21 = A[7];
AA31 = A[8];
AA41 = A[9];
AA02 = A[10];
AA12 = A[11];
AA22 = A[12];
AA32 = A[13];
AA42 = A[14];
AA03 = A[15];
AA13 = A[16];
AA23 = A[17];
AA33 = A[18];
AA43 = A[19];
AA04 = A[20];
AA14 = A[21];
AA24 = A[22];
AA34 = A[23];
AA44 = A[24];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
/****************** iteration 4 ****************/
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
}
}
template<typename T, int arch>
__global__ void matinv_6x6_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 6;
int perm0, perm1, perm2, perm3, perm4, perm5;
int icol0, icol1, icol2, icol3, icol4, icol5;
T AA00, AA01, AA02, AA03, AA04, AA05;
T AA10, AA11, AA12, AA13, AA14, AA15;
T AA20, AA21, AA22, AA23, AA24, AA25;
T AA30, AA31, AA32, AA33, AA34, AA35;
T AA40, AA41, AA42, AA43, AA44, AA45;
T AA50, AA51, AA52, AA53, AA54, AA55;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA01 = A[6];
AA11 = A[7];
AA21 = A[8];
AA31 = A[9];
AA41 = A[10];
AA51 = A[11];
AA02 = A[12];
AA12 = A[13];
AA22 = A[14];
AA32 = A[15];
AA42 = A[16];
AA52 = A[17];
AA03 = A[18];
AA13 = A[19];
AA23 = A[20];
AA33 = A[21];
AA43 = A[22];
AA53 = A[23];
AA04 = A[24];
AA14 = A[25];
AA24 = A[26];
AA34 = A[27];
AA44 = A[28];
AA54 = A[29];
AA05 = A[30];
AA15 = A[31];
AA25 = A[32];
AA35 = A[33];
AA45 = A[34];
AA55 = A[35];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
/****************** iteration 5 ****************/
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
}
}
template<typename T, int arch>
__global__ void matinv_7x7_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 7;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA01 = A[7];
AA11 = A[8];
AA21 = A[9];
AA31 = A[10];
AA41 = A[11];
AA51 = A[12];
AA61 = A[13];
AA02 = A[14];
AA12 = A[15];
AA22 = A[16];
AA32 = A[17];
AA42 = A[18];
AA52 = A[19];
AA62 = A[20];
AA03 = A[21];
AA13 = A[22];
AA23 = A[23];
AA33 = A[24];
AA43 = A[25];
AA53 = A[26];
AA63 = A[27];
AA04 = A[28];
AA14 = A[29];
AA24 = A[30];
AA34 = A[31];
AA44 = A[32];
AA54 = A[33];
AA64 = A[34];
AA05 = A[35];
AA15 = A[36];
AA25 = A[37];
AA35 = A[38];
AA45 = A[39];
AA55 = A[40];
AA65 = A[41];
AA06 = A[42];
AA16 = A[43];
AA26 = A[44];
AA36 = A[45];
AA46 = A[46];
AA56 = A[47];
AA66 = A[48];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
/****************** iteration 6 ****************/
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
}
}
template<typename T, int arch>
__global__ void matinv_8x8_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 8;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6, perm7;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6, icol7;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06, AA07;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16, AA17;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26, AA27;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36, AA37;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46, AA47;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56, AA57;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66, AA67;
T AA70, AA71, AA72, AA73, AA74, AA75, AA76, AA77;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA70 = A[7];
AA01 = A[8];
AA11 = A[9];
AA21 = A[10];
AA31 = A[11];
AA41 = A[12];
AA51 = A[13];
AA61 = A[14];
AA71 = A[15];
AA02 = A[16];
AA12 = A[17];
AA22 = A[18];
AA32 = A[19];
AA42 = A[20];
AA52 = A[21];
AA62 = A[22];
AA72 = A[23];
AA03 = A[24];
AA13 = A[25];
AA23 = A[26];
AA33 = A[27];
AA43 = A[28];
AA53 = A[29];
AA63 = A[30];
AA73 = A[31];
AA04 = A[32];
AA14 = A[33];
AA24 = A[34];
AA34 = A[35];
AA44 = A[36];
AA54 = A[37];
AA64 = A[38];
AA74 = A[39];
AA05 = A[40];
AA15 = A[41];
AA25 = A[42];
AA35 = A[43];
AA45 = A[44];
AA55 = A[45];
AA65 = A[46];
AA75 = A[47];
AA06 = A[48];
AA16 = A[49];
AA26 = A[50];
AA36 = A[51];
AA46 = A[52];
AA56 = A[53];
AA66 = A[54];
AA76 = A[55];
AA07 = A[56];
AA17 = A[57];
AA27 = A[58];
AA37 = A[59];
AA47 = A[60];
AA57 = A[61];
AA67 = A[62];
AA77 = A[63];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
perm7 = 7;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA70);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
tmp = AA07; AA07 = AA17; AA17 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
tmp = AA07; AA07 = AA27; AA27 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
tmp = AA07; AA07 = AA37; AA37 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
tmp = AA07; AA07 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
tmp = AA07; AA07 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
tmp = AA07; AA07 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA00; AA00 = AA70; AA70 = tmp;
tmp = AA01; AA01 = AA71; AA71 = tmp;
tmp = AA02; AA02 = AA72; AA72 = tmp;
tmp = AA03; AA03 = AA73; AA73 = tmp;
tmp = AA04; AA04 = AA74; AA74 = tmp;
tmp = AA05; AA05 = AA75; AA75 = tmp;
tmp = AA06; AA06 = AA76; AA76 = tmp;
tmp = AA07; AA07 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
AA07 = mulOp (tmp, AA07);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
AA17 = fmnaOp (tmp, AA07, AA17);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
AA27 = fmnaOp (tmp, AA07, AA27);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
AA37 = fmnaOp (tmp, AA07, AA37);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
AA47 = fmnaOp (tmp, AA07, AA47);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
AA57 = fmnaOp (tmp, AA07, AA57);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
AA67 = fmnaOp (tmp, AA07, AA67);
tmp = AA70;
AA70 = mulOp (negOp(tmp), AA00);
AA71 = fmnaOp (tmp, AA01, AA71);
AA72 = fmnaOp (tmp, AA02, AA72);
AA73 = fmnaOp (tmp, AA03, AA73);
AA74 = fmnaOp (tmp, AA04, AA74);
AA75 = fmnaOp (tmp, AA05, AA75);
AA76 = fmnaOp (tmp, AA06, AA76);
AA77 = fmnaOp (tmp, AA07, AA77);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA71);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
tmp = AA17; AA17 = AA27; AA27 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
tmp = AA17; AA17 = AA37; AA37 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
tmp = AA17; AA17 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
tmp = AA17; AA17 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
tmp = AA17; AA17 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA10; AA10 = AA70; AA70 = tmp;
tmp = AA11; AA11 = AA71; AA71 = tmp;
tmp = AA12; AA12 = AA72; AA72 = tmp;
tmp = AA13; AA13 = AA73; AA73 = tmp;
tmp = AA14; AA14 = AA74; AA74 = tmp;
tmp = AA15; AA15 = AA75; AA75 = tmp;
tmp = AA16; AA16 = AA76; AA76 = tmp;
tmp = AA17; AA17 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
AA17 = mulOp (tmp, AA17);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
AA07 = fmnaOp (tmp, AA17, AA07);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
AA27 = fmnaOp (tmp, AA17, AA27);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
AA37 = fmnaOp (tmp, AA17, AA37);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
AA47 = fmnaOp (tmp, AA17, AA47);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
AA57 = fmnaOp (tmp, AA17, AA57);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
AA67 = fmnaOp (tmp, AA17, AA67);
tmp = AA71;
AA70 = fmnaOp (tmp, AA10, AA70);
AA71 = mulOp (negOp(tmp), AA11);
AA72 = fmnaOp (tmp, AA12, AA72);
AA73 = fmnaOp (tmp, AA13, AA73);
AA74 = fmnaOp (tmp, AA14, AA74);
AA75 = fmnaOp (tmp, AA15, AA75);
AA76 = fmnaOp (tmp, AA16, AA76);
AA77 = fmnaOp (tmp, AA17, AA77);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA72);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
tmp = AA27; AA27 = AA37; AA37 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
tmp = AA27; AA27 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
tmp = AA27; AA27 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
tmp = AA27; AA27 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA20; AA20 = AA70; AA70 = tmp;
tmp = AA21; AA21 = AA71; AA71 = tmp;
tmp = AA22; AA22 = AA72; AA72 = tmp;
tmp = AA23; AA23 = AA73; AA73 = tmp;
tmp = AA24; AA24 = AA74; AA74 = tmp;
tmp = AA25; AA25 = AA75; AA75 = tmp;
tmp = AA26; AA26 = AA76; AA76 = tmp;
tmp = AA27; AA27 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
AA27 = mulOp (tmp, AA27);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
AA07 = fmnaOp (tmp, AA27, AA07);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
AA17 = fmnaOp (tmp, AA27, AA17);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
AA37 = fmnaOp (tmp, AA27, AA37);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
AA47 = fmnaOp (tmp, AA27, AA47);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
AA57 = fmnaOp (tmp, AA27, AA57);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
AA67 = fmnaOp (tmp, AA27, AA67);
tmp = AA72;
AA70 = fmnaOp (tmp, AA20, AA70);
AA71 = fmnaOp (tmp, AA21, AA71);
AA72 = mulOp (negOp(tmp), AA22);
AA73 = fmnaOp (tmp, AA23, AA73);
AA74 = fmnaOp (tmp, AA24, AA74);
AA75 = fmnaOp (tmp, AA25, AA75);
AA76 = fmnaOp (tmp, AA26, AA76);
AA77 = fmnaOp (tmp, AA27, AA77);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA73);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
tmp = AA37; AA37 = AA47; AA47 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
tmp = AA37; AA37 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
tmp = AA37; AA37 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA30; AA30 = AA70; AA70 = tmp;
tmp = AA31; AA31 = AA71; AA71 = tmp;
tmp = AA32; AA32 = AA72; AA72 = tmp;
tmp = AA33; AA33 = AA73; AA73 = tmp;
tmp = AA34; AA34 = AA74; AA74 = tmp;
tmp = AA35; AA35 = AA75; AA75 = tmp;
tmp = AA36; AA36 = AA76; AA76 = tmp;
tmp = AA37; AA37 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
AA37 = mulOp (tmp, AA37);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
AA07 = fmnaOp (tmp, AA37, AA07);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
AA17 = fmnaOp (tmp, AA37, AA17);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
AA27 = fmnaOp (tmp, AA37, AA27);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
AA47 = fmnaOp (tmp, AA37, AA47);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
AA57 = fmnaOp (tmp, AA37, AA57);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
AA67 = fmnaOp (tmp, AA37, AA67);
tmp = AA73;
AA70 = fmnaOp (tmp, AA30, AA70);
AA71 = fmnaOp (tmp, AA31, AA71);
AA72 = fmnaOp (tmp, AA32, AA72);
AA73 = mulOp (negOp(tmp), AA33);
AA74 = fmnaOp (tmp, AA34, AA74);
AA75 = fmnaOp (tmp, AA35, AA75);
AA76 = fmnaOp (tmp, AA36, AA76);
AA77 = fmnaOp (tmp, AA37, AA77);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA74);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
tmp = AA47; AA47 = AA57; AA57 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
tmp = AA47; AA47 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA40; AA40 = AA70; AA70 = tmp;
tmp = AA41; AA41 = AA71; AA71 = tmp;
tmp = AA42; AA42 = AA72; AA72 = tmp;
tmp = AA43; AA43 = AA73; AA73 = tmp;
tmp = AA44; AA44 = AA74; AA74 = tmp;
tmp = AA45; AA45 = AA75; AA75 = tmp;
tmp = AA46; AA46 = AA76; AA76 = tmp;
tmp = AA47; AA47 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
AA47 = mulOp (tmp, AA47);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
AA07 = fmnaOp (tmp, AA47, AA07);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
AA17 = fmnaOp (tmp, AA47, AA17);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
AA27 = fmnaOp (tmp, AA47, AA27);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
AA37 = fmnaOp (tmp, AA47, AA37);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
AA57 = fmnaOp (tmp, AA47, AA57);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
AA67 = fmnaOp (tmp, AA47, AA67);
tmp = AA74;
AA70 = fmnaOp (tmp, AA40, AA70);
AA71 = fmnaOp (tmp, AA41, AA71);
AA72 = fmnaOp (tmp, AA42, AA72);
AA73 = fmnaOp (tmp, AA43, AA73);
AA74 = mulOp (negOp(tmp), AA44);
AA75 = fmnaOp (tmp, AA45, AA75);
AA76 = fmnaOp (tmp, AA46, AA76);
AA77 = fmnaOp (tmp, AA47, AA77);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA75);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
tmp = AA57; AA57 = AA67; AA67 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA50; AA50 = AA70; AA70 = tmp;
tmp = AA51; AA51 = AA71; AA71 = tmp;
tmp = AA52; AA52 = AA72; AA72 = tmp;
tmp = AA53; AA53 = AA73; AA73 = tmp;
tmp = AA54; AA54 = AA74; AA74 = tmp;
tmp = AA55; AA55 = AA75; AA75 = tmp;
tmp = AA56; AA56 = AA76; AA76 = tmp;
tmp = AA57; AA57 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
AA57 = mulOp (tmp, AA57);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
AA07 = fmnaOp (tmp, AA57, AA07);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
AA17 = fmnaOp (tmp, AA57, AA17);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
AA27 = fmnaOp (tmp, AA57, AA27);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
AA37 = fmnaOp (tmp, AA57, AA37);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
AA47 = fmnaOp (tmp, AA57, AA47);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
AA67 = fmnaOp (tmp, AA57, AA67);
tmp = AA75;
AA70 = fmnaOp (tmp, AA50, AA70);
AA71 = fmnaOp (tmp, AA51, AA71);
AA72 = fmnaOp (tmp, AA52, AA72);
AA73 = fmnaOp (tmp, AA53, AA73);
AA74 = fmnaOp (tmp, AA54, AA74);
AA75 = mulOp (negOp(tmp), AA55);
AA76 = fmnaOp (tmp, AA56, AA76);
AA77 = fmnaOp (tmp, AA57, AA77);
/****************** iteration 6 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA66);
pvt = 6;
t = absOp (AA76);
if (t > p) { p = t; pvt = 7; }
/* swap pivot row with row 6 */
if (pvt == 7) {
tmp = AA60; AA60 = AA70; AA70 = tmp;
tmp = AA61; AA61 = AA71; AA71 = tmp;
tmp = AA62; AA62 = AA72; AA72 = tmp;
tmp = AA63; AA63 = AA73; AA73 = tmp;
tmp = AA64; AA64 = AA74; AA74 = tmp;
tmp = AA65; AA65 = AA75; AA75 = tmp;
tmp = AA66; AA66 = AA76; AA76 = tmp;
tmp = AA67; AA67 = AA77; AA77 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm7; perm7 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
AA67 = mulOp (tmp, AA67);
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
AA07 = fmnaOp (tmp, AA67, AA07);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
AA17 = fmnaOp (tmp, AA67, AA17);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
AA27 = fmnaOp (tmp, AA67, AA27);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
AA37 = fmnaOp (tmp, AA67, AA37);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
AA47 = fmnaOp (tmp, AA67, AA47);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
AA57 = fmnaOp (tmp, AA67, AA57);
tmp = AA76;
AA70 = fmnaOp (tmp, AA60, AA70);
AA71 = fmnaOp (tmp, AA61, AA71);
AA72 = fmnaOp (tmp, AA62, AA72);
AA73 = fmnaOp (tmp, AA63, AA73);
AA74 = fmnaOp (tmp, AA64, AA74);
AA75 = fmnaOp (tmp, AA65, AA75);
AA76 = mulOp (negOp(tmp), AA66);
AA77 = fmnaOp (tmp, AA67, AA77);
/****************** iteration 7 ****************/
/* scale current row */
tmp = rcpOp (AA77);
icol7 = perm7;
AA70 = mulOp (tmp, AA70);
AA71 = mulOp (tmp, AA71);
AA72 = mulOp (tmp, AA72);
AA73 = mulOp (tmp, AA73);
AA74 = mulOp (tmp, AA74);
AA75 = mulOp (tmp, AA75);
AA76 = mulOp (tmp, AA76);
AA77 = tmp;
/* eliminate above and below current row */
tmp = AA07;
AA00 = fmnaOp (tmp, AA70, AA00);
AA01 = fmnaOp (tmp, AA71, AA01);
AA02 = fmnaOp (tmp, AA72, AA02);
AA03 = fmnaOp (tmp, AA73, AA03);
AA04 = fmnaOp (tmp, AA74, AA04);
AA05 = fmnaOp (tmp, AA75, AA05);
AA06 = fmnaOp (tmp, AA76, AA06);
AA07 = mulOp (negOp(tmp), AA77);
tmp = AA17;
AA10 = fmnaOp (tmp, AA70, AA10);
AA11 = fmnaOp (tmp, AA71, AA11);
AA12 = fmnaOp (tmp, AA72, AA12);
AA13 = fmnaOp (tmp, AA73, AA13);
AA14 = fmnaOp (tmp, AA74, AA14);
AA15 = fmnaOp (tmp, AA75, AA15);
AA16 = fmnaOp (tmp, AA76, AA16);
AA17 = mulOp (negOp(tmp), AA77);
tmp = AA27;
AA20 = fmnaOp (tmp, AA70, AA20);
AA21 = fmnaOp (tmp, AA71, AA21);
AA22 = fmnaOp (tmp, AA72, AA22);
AA23 = fmnaOp (tmp, AA73, AA23);
AA24 = fmnaOp (tmp, AA74, AA24);
AA25 = fmnaOp (tmp, AA75, AA25);
AA26 = fmnaOp (tmp, AA76, AA26);
AA27 = mulOp (negOp(tmp), AA77);
tmp = AA37;
AA30 = fmnaOp (tmp, AA70, AA30);
AA31 = fmnaOp (tmp, AA71, AA31);
AA32 = fmnaOp (tmp, AA72, AA32);
AA33 = fmnaOp (tmp, AA73, AA33);
AA34 = fmnaOp (tmp, AA74, AA34);
AA35 = fmnaOp (tmp, AA75, AA35);
AA36 = fmnaOp (tmp, AA76, AA36);
AA37 = mulOp (negOp(tmp), AA77);
tmp = AA47;
AA40 = fmnaOp (tmp, AA70, AA40);
AA41 = fmnaOp (tmp, AA71, AA41);
AA42 = fmnaOp (tmp, AA72, AA42);
AA43 = fmnaOp (tmp, AA73, AA43);
AA44 = fmnaOp (tmp, AA74, AA44);
AA45 = fmnaOp (tmp, AA75, AA45);
AA46 = fmnaOp (tmp, AA76, AA46);
AA47 = mulOp (negOp(tmp), AA77);
tmp = AA57;
AA50 = fmnaOp (tmp, AA70, AA50);
AA51 = fmnaOp (tmp, AA71, AA51);
AA52 = fmnaOp (tmp, AA72, AA52);
AA53 = fmnaOp (tmp, AA73, AA53);
AA54 = fmnaOp (tmp, AA74, AA54);
AA55 = fmnaOp (tmp, AA75, AA55);
AA56 = fmnaOp (tmp, AA76, AA56);
AA57 = mulOp (negOp(tmp), AA77);
tmp = AA67;
AA60 = fmnaOp (tmp, AA70, AA60);
AA61 = fmnaOp (tmp, AA71, AA61);
AA62 = fmnaOp (tmp, AA72, AA62);
AA63 = fmnaOp (tmp, AA73, AA63);
AA64 = fmnaOp (tmp, AA74, AA64);
AA65 = fmnaOp (tmp, AA75, AA65);
AA66 = fmnaOp (tmp, AA76, AA66);
AA67 = mulOp (negOp(tmp), AA77);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(7,icol0) = AA70;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(7,icol1) = AA71;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(7,icol2) = AA72;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(7,icol3) = AA73;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(7,icol4) = AA74;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(7,icol5) = AA75;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
Ainv(7,icol6) = AA76;
Ainv(0,icol7) = AA07;
Ainv(1,icol7) = AA17;
Ainv(2,icol7) = AA27;
Ainv(3,icol7) = AA37;
Ainv(4,icol7) = AA47;
Ainv(5,icol7) = AA57;
Ainv(6,icol7) = AA67;
Ainv(7,icol7) = AA77;
}
}
template<typename T, int arch>
__global__ void matinv_9x9_matrix_per_thread (const T *A, T *Ainv, int batch)
{
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 9;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6, perm7, perm8;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6, icol7, icol8;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06, AA07, AA08;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16, AA17, AA18;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26, AA27, AA28;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36, AA37, AA38;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46, AA47, AA48;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56, AA57, AA58;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66, AA67, AA68;
T AA70, AA71, AA72, AA73, AA74, AA75, AA76, AA77, AA78;
T AA80, AA81, AA82, AA83, AA84, AA85, AA86, AA87, AA88;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA70 = A[7];
AA80 = A[8];
AA01 = A[9];
AA11 = A[10];
AA21 = A[11];
AA31 = A[12];
AA41 = A[13];
AA51 = A[14];
AA61 = A[15];
AA71 = A[16];
AA81 = A[17];
AA02 = A[18];
AA12 = A[19];
AA22 = A[20];
AA32 = A[21];
AA42 = A[22];
AA52 = A[23];
AA62 = A[24];
AA72 = A[25];
AA82 = A[26];
AA03 = A[27];
AA13 = A[28];
AA23 = A[29];
AA33 = A[30];
AA43 = A[31];
AA53 = A[32];
AA63 = A[33];
AA73 = A[34];
AA83 = A[35];
AA04 = A[36];
AA14 = A[37];
AA24 = A[38];
AA34 = A[39];
AA44 = A[40];
AA54 = A[41];
AA64 = A[42];
AA74 = A[43];
AA84 = A[44];
AA05 = A[45];
AA15 = A[46];
AA25 = A[47];
AA35 = A[48];
AA45 = A[49];
AA55 = A[50];
AA65 = A[51];
AA75 = A[52];
AA85 = A[53];
AA06 = A[54];
AA16 = A[55];
AA26 = A[56];
AA36 = A[57];
AA46 = A[58];
AA56 = A[59];
AA66 = A[60];
AA76 = A[61];
AA86 = A[62];
AA07 = A[63];
AA17 = A[64];
AA27 = A[65];
AA37 = A[66];
AA47 = A[67];
AA57 = A[68];
AA67 = A[69];
AA77 = A[70];
AA87 = A[71];
AA08 = A[72];
AA18 = A[73];
AA28 = A[74];
AA38 = A[75];
AA48 = A[76];
AA58 = A[77];
AA68 = A[78];
AA78 = A[79];
AA88 = A[80];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
perm7 = 7;
perm8 = 8;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA70);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA80);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
tmp = AA07; AA07 = AA17; AA17 = tmp;
tmp = AA08; AA08 = AA18; AA18 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
tmp = AA07; AA07 = AA27; AA27 = tmp;
tmp = AA08; AA08 = AA28; AA28 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
tmp = AA07; AA07 = AA37; AA37 = tmp;
tmp = AA08; AA08 = AA38; AA38 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
tmp = AA07; AA07 = AA47; AA47 = tmp;
tmp = AA08; AA08 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
tmp = AA07; AA07 = AA57; AA57 = tmp;
tmp = AA08; AA08 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
tmp = AA07; AA07 = AA67; AA67 = tmp;
tmp = AA08; AA08 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA00; AA00 = AA70; AA70 = tmp;
tmp = AA01; AA01 = AA71; AA71 = tmp;
tmp = AA02; AA02 = AA72; AA72 = tmp;
tmp = AA03; AA03 = AA73; AA73 = tmp;
tmp = AA04; AA04 = AA74; AA74 = tmp;
tmp = AA05; AA05 = AA75; AA75 = tmp;
tmp = AA06; AA06 = AA76; AA76 = tmp;
tmp = AA07; AA07 = AA77; AA77 = tmp;
tmp = AA08; AA08 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA00; AA00 = AA80; AA80 = tmp;
tmp = AA01; AA01 = AA81; AA81 = tmp;
tmp = AA02; AA02 = AA82; AA82 = tmp;
tmp = AA03; AA03 = AA83; AA83 = tmp;
tmp = AA04; AA04 = AA84; AA84 = tmp;
tmp = AA05; AA05 = AA85; AA85 = tmp;
tmp = AA06; AA06 = AA86; AA86 = tmp;
tmp = AA07; AA07 = AA87; AA87 = tmp;
tmp = AA08; AA08 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
AA07 = mulOp (tmp, AA07);
AA08 = mulOp (tmp, AA08);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
AA17 = fmnaOp (tmp, AA07, AA17);
AA18 = fmnaOp (tmp, AA08, AA18);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
AA27 = fmnaOp (tmp, AA07, AA27);
AA28 = fmnaOp (tmp, AA08, AA28);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
AA37 = fmnaOp (tmp, AA07, AA37);
AA38 = fmnaOp (tmp, AA08, AA38);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
AA47 = fmnaOp (tmp, AA07, AA47);
AA48 = fmnaOp (tmp, AA08, AA48);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
AA57 = fmnaOp (tmp, AA07, AA57);
AA58 = fmnaOp (tmp, AA08, AA58);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
AA67 = fmnaOp (tmp, AA07, AA67);
AA68 = fmnaOp (tmp, AA08, AA68);
tmp = AA70;
AA70 = mulOp (negOp(tmp), AA00);
AA71 = fmnaOp (tmp, AA01, AA71);
AA72 = fmnaOp (tmp, AA02, AA72);
AA73 = fmnaOp (tmp, AA03, AA73);
AA74 = fmnaOp (tmp, AA04, AA74);
AA75 = fmnaOp (tmp, AA05, AA75);
AA76 = fmnaOp (tmp, AA06, AA76);
AA77 = fmnaOp (tmp, AA07, AA77);
AA78 = fmnaOp (tmp, AA08, AA78);
tmp = AA80;
AA80 = mulOp (negOp(tmp), AA00);
AA81 = fmnaOp (tmp, AA01, AA81);
AA82 = fmnaOp (tmp, AA02, AA82);
AA83 = fmnaOp (tmp, AA03, AA83);
AA84 = fmnaOp (tmp, AA04, AA84);
AA85 = fmnaOp (tmp, AA05, AA85);
AA86 = fmnaOp (tmp, AA06, AA86);
AA87 = fmnaOp (tmp, AA07, AA87);
AA88 = fmnaOp (tmp, AA08, AA88);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA71);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA81);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
tmp = AA17; AA17 = AA27; AA27 = tmp;
tmp = AA18; AA18 = AA28; AA28 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
tmp = AA17; AA17 = AA37; AA37 = tmp;
tmp = AA18; AA18 = AA38; AA38 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
tmp = AA17; AA17 = AA47; AA47 = tmp;
tmp = AA18; AA18 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
tmp = AA17; AA17 = AA57; AA57 = tmp;
tmp = AA18; AA18 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
tmp = AA17; AA17 = AA67; AA67 = tmp;
tmp = AA18; AA18 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA10; AA10 = AA70; AA70 = tmp;
tmp = AA11; AA11 = AA71; AA71 = tmp;
tmp = AA12; AA12 = AA72; AA72 = tmp;
tmp = AA13; AA13 = AA73; AA73 = tmp;
tmp = AA14; AA14 = AA74; AA74 = tmp;
tmp = AA15; AA15 = AA75; AA75 = tmp;
tmp = AA16; AA16 = AA76; AA76 = tmp;
tmp = AA17; AA17 = AA77; AA77 = tmp;
tmp = AA18; AA18 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA10; AA10 = AA80; AA80 = tmp;
tmp = AA11; AA11 = AA81; AA81 = tmp;
tmp = AA12; AA12 = AA82; AA82 = tmp;
tmp = AA13; AA13 = AA83; AA83 = tmp;
tmp = AA14; AA14 = AA84; AA84 = tmp;
tmp = AA15; AA15 = AA85; AA85 = tmp;
tmp = AA16; AA16 = AA86; AA86 = tmp;
tmp = AA17; AA17 = AA87; AA87 = tmp;
tmp = AA18; AA18 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
AA17 = mulOp (tmp, AA17);
AA18 = mulOp (tmp, AA18);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
AA07 = fmnaOp (tmp, AA17, AA07);
AA08 = fmnaOp (tmp, AA18, AA08);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
AA27 = fmnaOp (tmp, AA17, AA27);
AA28 = fmnaOp (tmp, AA18, AA28);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
AA37 = fmnaOp (tmp, AA17, AA37);
AA38 = fmnaOp (tmp, AA18, AA38);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
AA47 = fmnaOp (tmp, AA17, AA47);
AA48 = fmnaOp (tmp, AA18, AA48);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
AA57 = fmnaOp (tmp, AA17, AA57);
AA58 = fmnaOp (tmp, AA18, AA58);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
AA67 = fmnaOp (tmp, AA17, AA67);
AA68 = fmnaOp (tmp, AA18, AA68);
tmp = AA71;
AA70 = fmnaOp (tmp, AA10, AA70);
AA71 = mulOp (negOp(tmp), AA11);
AA72 = fmnaOp (tmp, AA12, AA72);
AA73 = fmnaOp (tmp, AA13, AA73);
AA74 = fmnaOp (tmp, AA14, AA74);
AA75 = fmnaOp (tmp, AA15, AA75);
AA76 = fmnaOp (tmp, AA16, AA76);
AA77 = fmnaOp (tmp, AA17, AA77);
AA78 = fmnaOp (tmp, AA18, AA78);
tmp = AA81;
AA80 = fmnaOp (tmp, AA10, AA80);
AA81 = mulOp (negOp(tmp), AA11);
AA82 = fmnaOp (tmp, AA12, AA82);
AA83 = fmnaOp (tmp, AA13, AA83);
AA84 = fmnaOp (tmp, AA14, AA84);
AA85 = fmnaOp (tmp, AA15, AA85);
AA86 = fmnaOp (tmp, AA16, AA86);
AA87 = fmnaOp (tmp, AA17, AA87);
AA88 = fmnaOp (tmp, AA18, AA88);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA72);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA82);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
tmp = AA27; AA27 = AA37; AA37 = tmp;
tmp = AA28; AA28 = AA38; AA38 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
tmp = AA27; AA27 = AA47; AA47 = tmp;
tmp = AA28; AA28 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
tmp = AA27; AA27 = AA57; AA57 = tmp;
tmp = AA28; AA28 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
tmp = AA27; AA27 = AA67; AA67 = tmp;
tmp = AA28; AA28 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA20; AA20 = AA70; AA70 = tmp;
tmp = AA21; AA21 = AA71; AA71 = tmp;
tmp = AA22; AA22 = AA72; AA72 = tmp;
tmp = AA23; AA23 = AA73; AA73 = tmp;
tmp = AA24; AA24 = AA74; AA74 = tmp;
tmp = AA25; AA25 = AA75; AA75 = tmp;
tmp = AA26; AA26 = AA76; AA76 = tmp;
tmp = AA27; AA27 = AA77; AA77 = tmp;
tmp = AA28; AA28 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA20; AA20 = AA80; AA80 = tmp;
tmp = AA21; AA21 = AA81; AA81 = tmp;
tmp = AA22; AA22 = AA82; AA82 = tmp;
tmp = AA23; AA23 = AA83; AA83 = tmp;
tmp = AA24; AA24 = AA84; AA84 = tmp;
tmp = AA25; AA25 = AA85; AA85 = tmp;
tmp = AA26; AA26 = AA86; AA86 = tmp;
tmp = AA27; AA27 = AA87; AA87 = tmp;
tmp = AA28; AA28 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
AA27 = mulOp (tmp, AA27);
AA28 = mulOp (tmp, AA28);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
AA07 = fmnaOp (tmp, AA27, AA07);
AA08 = fmnaOp (tmp, AA28, AA08);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
AA17 = fmnaOp (tmp, AA27, AA17);
AA18 = fmnaOp (tmp, AA28, AA18);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
AA37 = fmnaOp (tmp, AA27, AA37);
AA38 = fmnaOp (tmp, AA28, AA38);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
AA47 = fmnaOp (tmp, AA27, AA47);
AA48 = fmnaOp (tmp, AA28, AA48);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
AA57 = fmnaOp (tmp, AA27, AA57);
AA58 = fmnaOp (tmp, AA28, AA58);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
AA67 = fmnaOp (tmp, AA27, AA67);
AA68 = fmnaOp (tmp, AA28, AA68);
tmp = AA72;
AA70 = fmnaOp (tmp, AA20, AA70);
AA71 = fmnaOp (tmp, AA21, AA71);
AA72 = mulOp (negOp(tmp), AA22);
AA73 = fmnaOp (tmp, AA23, AA73);
AA74 = fmnaOp (tmp, AA24, AA74);
AA75 = fmnaOp (tmp, AA25, AA75);
AA76 = fmnaOp (tmp, AA26, AA76);
AA77 = fmnaOp (tmp, AA27, AA77);
AA78 = fmnaOp (tmp, AA28, AA78);
tmp = AA82;
AA80 = fmnaOp (tmp, AA20, AA80);
AA81 = fmnaOp (tmp, AA21, AA81);
AA82 = mulOp (negOp(tmp), AA22);
AA83 = fmnaOp (tmp, AA23, AA83);
AA84 = fmnaOp (tmp, AA24, AA84);
AA85 = fmnaOp (tmp, AA25, AA85);
AA86 = fmnaOp (tmp, AA26, AA86);
AA87 = fmnaOp (tmp, AA27, AA87);
AA88 = fmnaOp (tmp, AA28, AA88);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA73);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA83);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
tmp = AA37; AA37 = AA47; AA47 = tmp;
tmp = AA38; AA38 = AA48; AA48 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
tmp = AA37; AA37 = AA57; AA57 = tmp;
tmp = AA38; AA38 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
tmp = AA37; AA37 = AA67; AA67 = tmp;
tmp = AA38; AA38 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA30; AA30 = AA70; AA70 = tmp;
tmp = AA31; AA31 = AA71; AA71 = tmp;
tmp = AA32; AA32 = AA72; AA72 = tmp;
tmp = AA33; AA33 = AA73; AA73 = tmp;
tmp = AA34; AA34 = AA74; AA74 = tmp;
tmp = AA35; AA35 = AA75; AA75 = tmp;
tmp = AA36; AA36 = AA76; AA76 = tmp;
tmp = AA37; AA37 = AA77; AA77 = tmp;
tmp = AA38; AA38 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA30; AA30 = AA80; AA80 = tmp;
tmp = AA31; AA31 = AA81; AA81 = tmp;
tmp = AA32; AA32 = AA82; AA82 = tmp;
tmp = AA33; AA33 = AA83; AA83 = tmp;
tmp = AA34; AA34 = AA84; AA84 = tmp;
tmp = AA35; AA35 = AA85; AA85 = tmp;
tmp = AA36; AA36 = AA86; AA86 = tmp;
tmp = AA37; AA37 = AA87; AA87 = tmp;
tmp = AA38; AA38 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
AA37 = mulOp (tmp, AA37);
AA38 = mulOp (tmp, AA38);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
AA07 = fmnaOp (tmp, AA37, AA07);
AA08 = fmnaOp (tmp, AA38, AA08);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
AA17 = fmnaOp (tmp, AA37, AA17);
AA18 = fmnaOp (tmp, AA38, AA18);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
AA27 = fmnaOp (tmp, AA37, AA27);
AA28 = fmnaOp (tmp, AA38, AA28);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
AA47 = fmnaOp (tmp, AA37, AA47);
AA48 = fmnaOp (tmp, AA38, AA48);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
AA57 = fmnaOp (tmp, AA37, AA57);
AA58 = fmnaOp (tmp, AA38, AA58);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
AA67 = fmnaOp (tmp, AA37, AA67);
AA68 = fmnaOp (tmp, AA38, AA68);
tmp = AA73;
AA70 = fmnaOp (tmp, AA30, AA70);
AA71 = fmnaOp (tmp, AA31, AA71);
AA72 = fmnaOp (tmp, AA32, AA72);
AA73 = mulOp (negOp(tmp), AA33);
AA74 = fmnaOp (tmp, AA34, AA74);
AA75 = fmnaOp (tmp, AA35, AA75);
AA76 = fmnaOp (tmp, AA36, AA76);
AA77 = fmnaOp (tmp, AA37, AA77);
AA78 = fmnaOp (tmp, AA38, AA78);
tmp = AA83;
AA80 = fmnaOp (tmp, AA30, AA80);
AA81 = fmnaOp (tmp, AA31, AA81);
AA82 = fmnaOp (tmp, AA32, AA82);
AA83 = mulOp (negOp(tmp), AA33);
AA84 = fmnaOp (tmp, AA34, AA84);
AA85 = fmnaOp (tmp, AA35, AA85);
AA86 = fmnaOp (tmp, AA36, AA86);
AA87 = fmnaOp (tmp, AA37, AA87);
AA88 = fmnaOp (tmp, AA38, AA88);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA74);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA84);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
tmp = AA47; AA47 = AA57; AA57 = tmp;
tmp = AA48; AA48 = AA58; AA58 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
tmp = AA47; AA47 = AA67; AA67 = tmp;
tmp = AA48; AA48 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA40; AA40 = AA70; AA70 = tmp;
tmp = AA41; AA41 = AA71; AA71 = tmp;
tmp = AA42; AA42 = AA72; AA72 = tmp;
tmp = AA43; AA43 = AA73; AA73 = tmp;
tmp = AA44; AA44 = AA74; AA74 = tmp;
tmp = AA45; AA45 = AA75; AA75 = tmp;
tmp = AA46; AA46 = AA76; AA76 = tmp;
tmp = AA47; AA47 = AA77; AA77 = tmp;
tmp = AA48; AA48 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA40; AA40 = AA80; AA80 = tmp;
tmp = AA41; AA41 = AA81; AA81 = tmp;
tmp = AA42; AA42 = AA82; AA82 = tmp;
tmp = AA43; AA43 = AA83; AA83 = tmp;
tmp = AA44; AA44 = AA84; AA84 = tmp;
tmp = AA45; AA45 = AA85; AA85 = tmp;
tmp = AA46; AA46 = AA86; AA86 = tmp;
tmp = AA47; AA47 = AA87; AA87 = tmp;
tmp = AA48; AA48 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
AA47 = mulOp (tmp, AA47);
AA48 = mulOp (tmp, AA48);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
AA07 = fmnaOp (tmp, AA47, AA07);
AA08 = fmnaOp (tmp, AA48, AA08);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
AA17 = fmnaOp (tmp, AA47, AA17);
AA18 = fmnaOp (tmp, AA48, AA18);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
AA27 = fmnaOp (tmp, AA47, AA27);
AA28 = fmnaOp (tmp, AA48, AA28);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
AA37 = fmnaOp (tmp, AA47, AA37);
AA38 = fmnaOp (tmp, AA48, AA38);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
AA57 = fmnaOp (tmp, AA47, AA57);
AA58 = fmnaOp (tmp, AA48, AA58);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
AA67 = fmnaOp (tmp, AA47, AA67);
AA68 = fmnaOp (tmp, AA48, AA68);
tmp = AA74;
AA70 = fmnaOp (tmp, AA40, AA70);
AA71 = fmnaOp (tmp, AA41, AA71);
AA72 = fmnaOp (tmp, AA42, AA72);
AA73 = fmnaOp (tmp, AA43, AA73);
AA74 = mulOp (negOp(tmp), AA44);
AA75 = fmnaOp (tmp, AA45, AA75);
AA76 = fmnaOp (tmp, AA46, AA76);
AA77 = fmnaOp (tmp, AA47, AA77);
AA78 = fmnaOp (tmp, AA48, AA78);
tmp = AA84;
AA80 = fmnaOp (tmp, AA40, AA80);
AA81 = fmnaOp (tmp, AA41, AA81);
AA82 = fmnaOp (tmp, AA42, AA82);
AA83 = fmnaOp (tmp, AA43, AA83);
AA84 = mulOp (negOp(tmp), AA44);
AA85 = fmnaOp (tmp, AA45, AA85);
AA86 = fmnaOp (tmp, AA46, AA86);
AA87 = fmnaOp (tmp, AA47, AA87);
AA88 = fmnaOp (tmp, AA48, AA88);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA75);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA85);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
tmp = AA57; AA57 = AA67; AA67 = tmp;
tmp = AA58; AA58 = AA68; AA68 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA50; AA50 = AA70; AA70 = tmp;
tmp = AA51; AA51 = AA71; AA71 = tmp;
tmp = AA52; AA52 = AA72; AA72 = tmp;
tmp = AA53; AA53 = AA73; AA73 = tmp;
tmp = AA54; AA54 = AA74; AA74 = tmp;
tmp = AA55; AA55 = AA75; AA75 = tmp;
tmp = AA56; AA56 = AA76; AA76 = tmp;
tmp = AA57; AA57 = AA77; AA77 = tmp;
tmp = AA58; AA58 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA50; AA50 = AA80; AA80 = tmp;
tmp = AA51; AA51 = AA81; AA81 = tmp;
tmp = AA52; AA52 = AA82; AA82 = tmp;
tmp = AA53; AA53 = AA83; AA83 = tmp;
tmp = AA54; AA54 = AA84; AA84 = tmp;
tmp = AA55; AA55 = AA85; AA85 = tmp;
tmp = AA56; AA56 = AA86; AA86 = tmp;
tmp = AA57; AA57 = AA87; AA87 = tmp;
tmp = AA58; AA58 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
AA57 = mulOp (tmp, AA57);
AA58 = mulOp (tmp, AA58);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
AA07 = fmnaOp (tmp, AA57, AA07);
AA08 = fmnaOp (tmp, AA58, AA08);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
AA17 = fmnaOp (tmp, AA57, AA17);
AA18 = fmnaOp (tmp, AA58, AA18);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
AA27 = fmnaOp (tmp, AA57, AA27);
AA28 = fmnaOp (tmp, AA58, AA28);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
AA37 = fmnaOp (tmp, AA57, AA37);
AA38 = fmnaOp (tmp, AA58, AA38);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
AA47 = fmnaOp (tmp, AA57, AA47);
AA48 = fmnaOp (tmp, AA58, AA48);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
AA67 = fmnaOp (tmp, AA57, AA67);
AA68 = fmnaOp (tmp, AA58, AA68);
tmp = AA75;
AA70 = fmnaOp (tmp, AA50, AA70);
AA71 = fmnaOp (tmp, AA51, AA71);
AA72 = fmnaOp (tmp, AA52, AA72);
AA73 = fmnaOp (tmp, AA53, AA73);
AA74 = fmnaOp (tmp, AA54, AA74);
AA75 = mulOp (negOp(tmp), AA55);
AA76 = fmnaOp (tmp, AA56, AA76);
AA77 = fmnaOp (tmp, AA57, AA77);
AA78 = fmnaOp (tmp, AA58, AA78);
tmp = AA85;
AA80 = fmnaOp (tmp, AA50, AA80);
AA81 = fmnaOp (tmp, AA51, AA81);
AA82 = fmnaOp (tmp, AA52, AA82);
AA83 = fmnaOp (tmp, AA53, AA83);
AA84 = fmnaOp (tmp, AA54, AA84);
AA85 = mulOp (negOp(tmp), AA55);
AA86 = fmnaOp (tmp, AA56, AA86);
AA87 = fmnaOp (tmp, AA57, AA87);
AA88 = fmnaOp (tmp, AA58, AA88);
/****************** iteration 6 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA66);
pvt = 6;
t = absOp (AA76);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA86);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 6 */
if (pvt == 7) {
tmp = AA60; AA60 = AA70; AA70 = tmp;
tmp = AA61; AA61 = AA71; AA71 = tmp;
tmp = AA62; AA62 = AA72; AA72 = tmp;
tmp = AA63; AA63 = AA73; AA73 = tmp;
tmp = AA64; AA64 = AA74; AA74 = tmp;
tmp = AA65; AA65 = AA75; AA75 = tmp;
tmp = AA66; AA66 = AA76; AA76 = tmp;
tmp = AA67; AA67 = AA77; AA77 = tmp;
tmp = AA68; AA68 = AA78; AA78 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA60; AA60 = AA80; AA80 = tmp;
tmp = AA61; AA61 = AA81; AA81 = tmp;
tmp = AA62; AA62 = AA82; AA82 = tmp;
tmp = AA63; AA63 = AA83; AA83 = tmp;
tmp = AA64; AA64 = AA84; AA84 = tmp;
tmp = AA65; AA65 = AA85; AA85 = tmp;
tmp = AA66; AA66 = AA86; AA86 = tmp;
tmp = AA67; AA67 = AA87; AA87 = tmp;
tmp = AA68; AA68 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
AA67 = mulOp (tmp, AA67);
AA68 = mulOp (tmp, AA68);
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
AA07 = fmnaOp (tmp, AA67, AA07);
AA08 = fmnaOp (tmp, AA68, AA08);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
AA17 = fmnaOp (tmp, AA67, AA17);
AA18 = fmnaOp (tmp, AA68, AA18);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
AA27 = fmnaOp (tmp, AA67, AA27);
AA28 = fmnaOp (tmp, AA68, AA28);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
AA37 = fmnaOp (tmp, AA67, AA37);
AA38 = fmnaOp (tmp, AA68, AA38);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
AA47 = fmnaOp (tmp, AA67, AA47);
AA48 = fmnaOp (tmp, AA68, AA48);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
AA57 = fmnaOp (tmp, AA67, AA57);
AA58 = fmnaOp (tmp, AA68, AA58);
tmp = AA76;
AA70 = fmnaOp (tmp, AA60, AA70);
AA71 = fmnaOp (tmp, AA61, AA71);
AA72 = fmnaOp (tmp, AA62, AA72);
AA73 = fmnaOp (tmp, AA63, AA73);
AA74 = fmnaOp (tmp, AA64, AA74);
AA75 = fmnaOp (tmp, AA65, AA75);
AA76 = mulOp (negOp(tmp), AA66);
AA77 = fmnaOp (tmp, AA67, AA77);
AA78 = fmnaOp (tmp, AA68, AA78);
tmp = AA86;
AA80 = fmnaOp (tmp, AA60, AA80);
AA81 = fmnaOp (tmp, AA61, AA81);
AA82 = fmnaOp (tmp, AA62, AA82);
AA83 = fmnaOp (tmp, AA63, AA83);
AA84 = fmnaOp (tmp, AA64, AA84);
AA85 = fmnaOp (tmp, AA65, AA85);
AA86 = mulOp (negOp(tmp), AA66);
AA87 = fmnaOp (tmp, AA67, AA87);
AA88 = fmnaOp (tmp, AA68, AA88);
/****************** iteration 7 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA77);
pvt = 7;
t = absOp (AA87);
if (t > p) { p = t; pvt = 8; }
/* swap pivot row with row 7 */
if (pvt == 8) {
tmp = AA70; AA70 = AA80; AA80 = tmp;
tmp = AA71; AA71 = AA81; AA81 = tmp;
tmp = AA72; AA72 = AA82; AA82 = tmp;
tmp = AA73; AA73 = AA83; AA83 = tmp;
tmp = AA74; AA74 = AA84; AA84 = tmp;
tmp = AA75; AA75 = AA85; AA85 = tmp;
tmp = AA76; AA76 = AA86; AA86 = tmp;
tmp = AA77; AA77 = AA87; AA87 = tmp;
tmp = AA78; AA78 = AA88; AA88 = tmp;
/* update permutation vector based on row swap */
i = perm7; perm7 = perm8; perm8 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA77);
icol7 = perm7;
AA70 = mulOp (tmp, AA70);
AA71 = mulOp (tmp, AA71);
AA72 = mulOp (tmp, AA72);
AA73 = mulOp (tmp, AA73);
AA74 = mulOp (tmp, AA74);
AA75 = mulOp (tmp, AA75);
AA76 = mulOp (tmp, AA76);
AA77 = tmp;
AA78 = mulOp (tmp, AA78);
/* eliminate above and below current row */
tmp = AA07;
AA00 = fmnaOp (tmp, AA70, AA00);
AA01 = fmnaOp (tmp, AA71, AA01);
AA02 = fmnaOp (tmp, AA72, AA02);
AA03 = fmnaOp (tmp, AA73, AA03);
AA04 = fmnaOp (tmp, AA74, AA04);
AA05 = fmnaOp (tmp, AA75, AA05);
AA06 = fmnaOp (tmp, AA76, AA06);
AA07 = mulOp (negOp(tmp), AA77);
AA08 = fmnaOp (tmp, AA78, AA08);
tmp = AA17;
AA10 = fmnaOp (tmp, AA70, AA10);
AA11 = fmnaOp (tmp, AA71, AA11);
AA12 = fmnaOp (tmp, AA72, AA12);
AA13 = fmnaOp (tmp, AA73, AA13);
AA14 = fmnaOp (tmp, AA74, AA14);
AA15 = fmnaOp (tmp, AA75, AA15);
AA16 = fmnaOp (tmp, AA76, AA16);
AA17 = mulOp (negOp(tmp), AA77);
AA18 = fmnaOp (tmp, AA78, AA18);
tmp = AA27;
AA20 = fmnaOp (tmp, AA70, AA20);
AA21 = fmnaOp (tmp, AA71, AA21);
AA22 = fmnaOp (tmp, AA72, AA22);
AA23 = fmnaOp (tmp, AA73, AA23);
AA24 = fmnaOp (tmp, AA74, AA24);
AA25 = fmnaOp (tmp, AA75, AA25);
AA26 = fmnaOp (tmp, AA76, AA26);
AA27 = mulOp (negOp(tmp), AA77);
AA28 = fmnaOp (tmp, AA78, AA28);
tmp = AA37;
AA30 = fmnaOp (tmp, AA70, AA30);
AA31 = fmnaOp (tmp, AA71, AA31);
AA32 = fmnaOp (tmp, AA72, AA32);
AA33 = fmnaOp (tmp, AA73, AA33);
AA34 = fmnaOp (tmp, AA74, AA34);
AA35 = fmnaOp (tmp, AA75, AA35);
AA36 = fmnaOp (tmp, AA76, AA36);
AA37 = mulOp (negOp(tmp), AA77);
AA38 = fmnaOp (tmp, AA78, AA38);
tmp = AA47;
AA40 = fmnaOp (tmp, AA70, AA40);
AA41 = fmnaOp (tmp, AA71, AA41);
AA42 = fmnaOp (tmp, AA72, AA42);
AA43 = fmnaOp (tmp, AA73, AA43);
AA44 = fmnaOp (tmp, AA74, AA44);
AA45 = fmnaOp (tmp, AA75, AA45);
AA46 = fmnaOp (tmp, AA76, AA46);
AA47 = mulOp (negOp(tmp), AA77);
AA48 = fmnaOp (tmp, AA78, AA48);
tmp = AA57;
AA50 = fmnaOp (tmp, AA70, AA50);
AA51 = fmnaOp (tmp, AA71, AA51);
AA52 = fmnaOp (tmp, AA72, AA52);
AA53 = fmnaOp (tmp, AA73, AA53);
AA54 = fmnaOp (tmp, AA74, AA54);
AA55 = fmnaOp (tmp, AA75, AA55);
AA56 = fmnaOp (tmp, AA76, AA56);
AA57 = mulOp (negOp(tmp), AA77);
AA58 = fmnaOp (tmp, AA78, AA58);
tmp = AA67;
AA60 = fmnaOp (tmp, AA70, AA60);
AA61 = fmnaOp (tmp, AA71, AA61);
AA62 = fmnaOp (tmp, AA72, AA62);
AA63 = fmnaOp (tmp, AA73, AA63);
AA64 = fmnaOp (tmp, AA74, AA64);
AA65 = fmnaOp (tmp, AA75, AA65);
AA66 = fmnaOp (tmp, AA76, AA66);
AA67 = mulOp (negOp(tmp), AA77);
AA68 = fmnaOp (tmp, AA78, AA68);
tmp = AA87;
AA80 = fmnaOp (tmp, AA70, AA80);
AA81 = fmnaOp (tmp, AA71, AA81);
AA82 = fmnaOp (tmp, AA72, AA82);
AA83 = fmnaOp (tmp, AA73, AA83);
AA84 = fmnaOp (tmp, AA74, AA84);
AA85 = fmnaOp (tmp, AA75, AA85);
AA86 = fmnaOp (tmp, AA76, AA86);
AA87 = mulOp (negOp(tmp), AA77);
AA88 = fmnaOp (tmp, AA78, AA88);
/****************** iteration 8 ****************/
/* scale current row */
tmp = rcpOp (AA88);
icol8 = perm8;
AA80 = mulOp (tmp, AA80);
AA81 = mulOp (tmp, AA81);
AA82 = mulOp (tmp, AA82);
AA83 = mulOp (tmp, AA83);
AA84 = mulOp (tmp, AA84);
AA85 = mulOp (tmp, AA85);
AA86 = mulOp (tmp, AA86);
AA87 = mulOp (tmp, AA87);
AA88 = tmp;
/* eliminate above and below current row */
tmp = AA08;
AA00 = fmnaOp (tmp, AA80, AA00);
AA01 = fmnaOp (tmp, AA81, AA01);
AA02 = fmnaOp (tmp, AA82, AA02);
AA03 = fmnaOp (tmp, AA83, AA03);
AA04 = fmnaOp (tmp, AA84, AA04);
AA05 = fmnaOp (tmp, AA85, AA05);
AA06 = fmnaOp (tmp, AA86, AA06);
AA07 = fmnaOp (tmp, AA87, AA07);
AA08 = mulOp (negOp(tmp), AA88);
tmp = AA18;
AA10 = fmnaOp (tmp, AA80, AA10);
AA11 = fmnaOp (tmp, AA81, AA11);
AA12 = fmnaOp (tmp, AA82, AA12);
AA13 = fmnaOp (tmp, AA83, AA13);
AA14 = fmnaOp (tmp, AA84, AA14);
AA15 = fmnaOp (tmp, AA85, AA15);
AA16 = fmnaOp (tmp, AA86, AA16);
AA17 = fmnaOp (tmp, AA87, AA17);
AA18 = mulOp (negOp(tmp), AA88);
tmp = AA28;
AA20 = fmnaOp (tmp, AA80, AA20);
AA21 = fmnaOp (tmp, AA81, AA21);
AA22 = fmnaOp (tmp, AA82, AA22);
AA23 = fmnaOp (tmp, AA83, AA23);
AA24 = fmnaOp (tmp, AA84, AA24);
AA25 = fmnaOp (tmp, AA85, AA25);
AA26 = fmnaOp (tmp, AA86, AA26);
AA27 = fmnaOp (tmp, AA87, AA27);
AA28 = mulOp (negOp(tmp), AA88);
tmp = AA38;
AA30 = fmnaOp (tmp, AA80, AA30);
AA31 = fmnaOp (tmp, AA81, AA31);
AA32 = fmnaOp (tmp, AA82, AA32);
AA33 = fmnaOp (tmp, AA83, AA33);
AA34 = fmnaOp (tmp, AA84, AA34);
AA35 = fmnaOp (tmp, AA85, AA35);
AA36 = fmnaOp (tmp, AA86, AA36);
AA37 = fmnaOp (tmp, AA87, AA37);
AA38 = mulOp (negOp(tmp), AA88);
tmp = AA48;
AA40 = fmnaOp (tmp, AA80, AA40);
AA41 = fmnaOp (tmp, AA81, AA41);
AA42 = fmnaOp (tmp, AA82, AA42);
AA43 = fmnaOp (tmp, AA83, AA43);
AA44 = fmnaOp (tmp, AA84, AA44);
AA45 = fmnaOp (tmp, AA85, AA45);
AA46 = fmnaOp (tmp, AA86, AA46);
AA47 = fmnaOp (tmp, AA87, AA47);
AA48 = mulOp (negOp(tmp), AA88);
tmp = AA58;
AA50 = fmnaOp (tmp, AA80, AA50);
AA51 = fmnaOp (tmp, AA81, AA51);
AA52 = fmnaOp (tmp, AA82, AA52);
AA53 = fmnaOp (tmp, AA83, AA53);
AA54 = fmnaOp (tmp, AA84, AA54);
AA55 = fmnaOp (tmp, AA85, AA55);
AA56 = fmnaOp (tmp, AA86, AA56);
AA57 = fmnaOp (tmp, AA87, AA57);
AA58 = mulOp (negOp(tmp), AA88);
tmp = AA68;
AA60 = fmnaOp (tmp, AA80, AA60);
AA61 = fmnaOp (tmp, AA81, AA61);
AA62 = fmnaOp (tmp, AA82, AA62);
AA63 = fmnaOp (tmp, AA83, AA63);
AA64 = fmnaOp (tmp, AA84, AA64);
AA65 = fmnaOp (tmp, AA85, AA65);
AA66 = fmnaOp (tmp, AA86, AA66);
AA67 = fmnaOp (tmp, AA87, AA67);
AA68 = mulOp (negOp(tmp), AA88);
tmp = AA78;
AA70 = fmnaOp (tmp, AA80, AA70);
AA71 = fmnaOp (tmp, AA81, AA71);
AA72 = fmnaOp (tmp, AA82, AA72);
AA73 = fmnaOp (tmp, AA83, AA73);
AA74 = fmnaOp (tmp, AA84, AA74);
AA75 = fmnaOp (tmp, AA85, AA75);
AA76 = fmnaOp (tmp, AA86, AA76);
AA77 = fmnaOp (tmp, AA87, AA77);
AA78 = mulOp (negOp(tmp), AA88);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(7,icol0) = AA70;
Ainv(8,icol0) = AA80;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(7,icol1) = AA71;
Ainv(8,icol1) = AA81;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(7,icol2) = AA72;
Ainv(8,icol2) = AA82;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(7,icol3) = AA73;
Ainv(8,icol3) = AA83;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(7,icol4) = AA74;
Ainv(8,icol4) = AA84;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(7,icol5) = AA75;
Ainv(8,icol5) = AA85;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
Ainv(7,icol6) = AA76;
Ainv(8,icol6) = AA86;
Ainv(0,icol7) = AA07;
Ainv(1,icol7) = AA17;
Ainv(2,icol7) = AA27;
Ainv(3,icol7) = AA37;
Ainv(4,icol7) = AA47;
Ainv(5,icol7) = AA57;
Ainv(6,icol7) = AA67;
Ainv(7,icol7) = AA77;
Ainv(8,icol7) = AA87;
Ainv(0,icol8) = AA08;
Ainv(1,icol8) = AA18;
Ainv(2,icol8) = AA28;
Ainv(3,icol8) = AA38;
Ainv(4,icol8) = AA48;
Ainv(5,icol8) = AA58;
Ainv(6,icol8) = AA68;
Ainv(7,icol8) = AA78;
Ainv(8,icol8) = AA88;
}
}
template<typename T, int arch>
__global__ void matinv_10x10_matrix_per_thread (const T *A, T *Ainv, int batch)
{
/* This is a hack. The instatiation of this template functions fails when
arch = ARCH_SM13 and T = cuDoubleComplex, since the generated code needs
more than 16KB of local memory. Since we don't need an instance of this
template function on either sm_13 nor sm_20, simply compile out all code
in the function when T = cuDoubleComplex.
*/
if (!isDoubleComplex<T>()) {
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
const int thrdNum = blkNum * blockDim.x + threadIdx.x;
const int N = 10;
int perm0, perm1, perm2, perm3, perm4, perm5, perm6, perm7, perm8, perm9;
int icol0, icol1, icol2, icol3, icol4, icol5, icol6, icol7, icol8, icol9;
T AA00, AA01, AA02, AA03, AA04, AA05, AA06, AA07, AA08, AA09;
T AA10, AA11, AA12, AA13, AA14, AA15, AA16, AA17, AA18, AA19;
T AA20, AA21, AA22, AA23, AA24, AA25, AA26, AA27, AA28, AA29;
T AA30, AA31, AA32, AA33, AA34, AA35, AA36, AA37, AA38, AA39;
T AA40, AA41, AA42, AA43, AA44, AA45, AA46, AA47, AA48, AA49;
T AA50, AA51, AA52, AA53, AA54, AA55, AA56, AA57, AA58, AA59;
T AA60, AA61, AA62, AA63, AA64, AA65, AA66, AA67, AA68, AA69;
T AA70, AA71, AA72, AA73, AA74, AA75, AA76, AA77, AA78, AA79;
T AA80, AA81, AA82, AA83, AA84, AA85, AA86, AA87, AA88, AA89;
T AA90, AA91, AA92, AA93, AA94, AA95, AA96, AA97, AA98, AA99;
T tmp;
#if USE_PIVOTING
typename config<T,arch>::absValType t;
typename config<T,arch>::absValType p;
int i, pvt;
#endif
A += thrdNum * N * N;
Ainv += thrdNum * N * N;
if (thrdNum < batch) {
AA00 = A[0];
AA10 = A[1];
AA20 = A[2];
AA30 = A[3];
AA40 = A[4];
AA50 = A[5];
AA60 = A[6];
AA70 = A[7];
AA80 = A[8];
AA90 = A[9];
AA01 = A[10];
AA11 = A[11];
AA21 = A[12];
AA31 = A[13];
AA41 = A[14];
AA51 = A[15];
AA61 = A[16];
AA71 = A[17];
AA81 = A[18];
AA91 = A[19];
AA02 = A[20];
AA12 = A[21];
AA22 = A[22];
AA32 = A[23];
AA42 = A[24];
AA52 = A[25];
AA62 = A[26];
AA72 = A[27];
AA82 = A[28];
AA92 = A[29];
AA03 = A[30];
AA13 = A[31];
AA23 = A[32];
AA33 = A[33];
AA43 = A[34];
AA53 = A[35];
AA63 = A[36];
AA73 = A[37];
AA83 = A[38];
AA93 = A[39];
AA04 = A[40];
AA14 = A[41];
AA24 = A[42];
AA34 = A[43];
AA44 = A[44];
AA54 = A[45];
AA64 = A[46];
AA74 = A[47];
AA84 = A[48];
AA94 = A[49];
AA05 = A[50];
AA15 = A[51];
AA25 = A[52];
AA35 = A[53];
AA45 = A[54];
AA55 = A[55];
AA65 = A[56];
AA75 = A[57];
AA85 = A[58];
AA95 = A[59];
AA06 = A[60];
AA16 = A[61];
AA26 = A[62];
AA36 = A[63];
AA46 = A[64];
AA56 = A[65];
AA66 = A[66];
AA76 = A[67];
AA86 = A[68];
AA96 = A[69];
AA07 = A[70];
AA17 = A[71];
AA27 = A[72];
AA37 = A[73];
AA47 = A[74];
AA57 = A[75];
AA67 = A[76];
AA77 = A[77];
AA87 = A[78];
AA97 = A[79];
AA08 = A[80];
AA18 = A[81];
AA28 = A[82];
AA38 = A[83];
AA48 = A[84];
AA58 = A[85];
AA68 = A[86];
AA78 = A[87];
AA88 = A[88];
AA98 = A[89];
AA09 = A[90];
AA19 = A[91];
AA29 = A[92];
AA39 = A[93];
AA49 = A[94];
AA59 = A[95];
AA69 = A[96];
AA79 = A[97];
AA89 = A[98];
AA99 = A[99];
perm0 = 0;
perm1 = 1;
perm2 = 2;
perm3 = 3;
perm4 = 4;
perm5 = 5;
perm6 = 6;
perm7 = 7;
perm8 = 8;
perm9 = 9;
/****************** iteration 0 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA00);
pvt = 0;
t = absOp (AA10);
if (t > p) { p = t; pvt = 1; }
t = absOp (AA20);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA30);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA40);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA50);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA60);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA70);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA80);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA90);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 0 */
if (pvt == 1) {
tmp = AA00; AA00 = AA10; AA10 = tmp;
tmp = AA01; AA01 = AA11; AA11 = tmp;
tmp = AA02; AA02 = AA12; AA12 = tmp;
tmp = AA03; AA03 = AA13; AA13 = tmp;
tmp = AA04; AA04 = AA14; AA14 = tmp;
tmp = AA05; AA05 = AA15; AA15 = tmp;
tmp = AA06; AA06 = AA16; AA16 = tmp;
tmp = AA07; AA07 = AA17; AA17 = tmp;
tmp = AA08; AA08 = AA18; AA18 = tmp;
tmp = AA09; AA09 = AA19; AA19 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm1; perm1 = i;
}
if (pvt == 2) {
tmp = AA00; AA00 = AA20; AA20 = tmp;
tmp = AA01; AA01 = AA21; AA21 = tmp;
tmp = AA02; AA02 = AA22; AA22 = tmp;
tmp = AA03; AA03 = AA23; AA23 = tmp;
tmp = AA04; AA04 = AA24; AA24 = tmp;
tmp = AA05; AA05 = AA25; AA25 = tmp;
tmp = AA06; AA06 = AA26; AA26 = tmp;
tmp = AA07; AA07 = AA27; AA27 = tmp;
tmp = AA08; AA08 = AA28; AA28 = tmp;
tmp = AA09; AA09 = AA29; AA29 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA00; AA00 = AA30; AA30 = tmp;
tmp = AA01; AA01 = AA31; AA31 = tmp;
tmp = AA02; AA02 = AA32; AA32 = tmp;
tmp = AA03; AA03 = AA33; AA33 = tmp;
tmp = AA04; AA04 = AA34; AA34 = tmp;
tmp = AA05; AA05 = AA35; AA35 = tmp;
tmp = AA06; AA06 = AA36; AA36 = tmp;
tmp = AA07; AA07 = AA37; AA37 = tmp;
tmp = AA08; AA08 = AA38; AA38 = tmp;
tmp = AA09; AA09 = AA39; AA39 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA00; AA00 = AA40; AA40 = tmp;
tmp = AA01; AA01 = AA41; AA41 = tmp;
tmp = AA02; AA02 = AA42; AA42 = tmp;
tmp = AA03; AA03 = AA43; AA43 = tmp;
tmp = AA04; AA04 = AA44; AA44 = tmp;
tmp = AA05; AA05 = AA45; AA45 = tmp;
tmp = AA06; AA06 = AA46; AA46 = tmp;
tmp = AA07; AA07 = AA47; AA47 = tmp;
tmp = AA08; AA08 = AA48; AA48 = tmp;
tmp = AA09; AA09 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA00; AA00 = AA50; AA50 = tmp;
tmp = AA01; AA01 = AA51; AA51 = tmp;
tmp = AA02; AA02 = AA52; AA52 = tmp;
tmp = AA03; AA03 = AA53; AA53 = tmp;
tmp = AA04; AA04 = AA54; AA54 = tmp;
tmp = AA05; AA05 = AA55; AA55 = tmp;
tmp = AA06; AA06 = AA56; AA56 = tmp;
tmp = AA07; AA07 = AA57; AA57 = tmp;
tmp = AA08; AA08 = AA58; AA58 = tmp;
tmp = AA09; AA09 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA00; AA00 = AA60; AA60 = tmp;
tmp = AA01; AA01 = AA61; AA61 = tmp;
tmp = AA02; AA02 = AA62; AA62 = tmp;
tmp = AA03; AA03 = AA63; AA63 = tmp;
tmp = AA04; AA04 = AA64; AA64 = tmp;
tmp = AA05; AA05 = AA65; AA65 = tmp;
tmp = AA06; AA06 = AA66; AA66 = tmp;
tmp = AA07; AA07 = AA67; AA67 = tmp;
tmp = AA08; AA08 = AA68; AA68 = tmp;
tmp = AA09; AA09 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA00; AA00 = AA70; AA70 = tmp;
tmp = AA01; AA01 = AA71; AA71 = tmp;
tmp = AA02; AA02 = AA72; AA72 = tmp;
tmp = AA03; AA03 = AA73; AA73 = tmp;
tmp = AA04; AA04 = AA74; AA74 = tmp;
tmp = AA05; AA05 = AA75; AA75 = tmp;
tmp = AA06; AA06 = AA76; AA76 = tmp;
tmp = AA07; AA07 = AA77; AA77 = tmp;
tmp = AA08; AA08 = AA78; AA78 = tmp;
tmp = AA09; AA09 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA00; AA00 = AA80; AA80 = tmp;
tmp = AA01; AA01 = AA81; AA81 = tmp;
tmp = AA02; AA02 = AA82; AA82 = tmp;
tmp = AA03; AA03 = AA83; AA83 = tmp;
tmp = AA04; AA04 = AA84; AA84 = tmp;
tmp = AA05; AA05 = AA85; AA85 = tmp;
tmp = AA06; AA06 = AA86; AA86 = tmp;
tmp = AA07; AA07 = AA87; AA87 = tmp;
tmp = AA08; AA08 = AA88; AA88 = tmp;
tmp = AA09; AA09 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA00; AA00 = AA90; AA90 = tmp;
tmp = AA01; AA01 = AA91; AA91 = tmp;
tmp = AA02; AA02 = AA92; AA92 = tmp;
tmp = AA03; AA03 = AA93; AA93 = tmp;
tmp = AA04; AA04 = AA94; AA94 = tmp;
tmp = AA05; AA05 = AA95; AA95 = tmp;
tmp = AA06; AA06 = AA96; AA96 = tmp;
tmp = AA07; AA07 = AA97; AA97 = tmp;
tmp = AA08; AA08 = AA98; AA98 = tmp;
tmp = AA09; AA09 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm0; perm0 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA00);
icol0 = perm0;
AA00 = tmp;
AA01 = mulOp (tmp, AA01);
AA02 = mulOp (tmp, AA02);
AA03 = mulOp (tmp, AA03);
AA04 = mulOp (tmp, AA04);
AA05 = mulOp (tmp, AA05);
AA06 = mulOp (tmp, AA06);
AA07 = mulOp (tmp, AA07);
AA08 = mulOp (tmp, AA08);
AA09 = mulOp (tmp, AA09);
/* eliminate above and below current row */
tmp = AA10;
AA10 = mulOp (negOp(tmp), AA00);
AA11 = fmnaOp (tmp, AA01, AA11);
AA12 = fmnaOp (tmp, AA02, AA12);
AA13 = fmnaOp (tmp, AA03, AA13);
AA14 = fmnaOp (tmp, AA04, AA14);
AA15 = fmnaOp (tmp, AA05, AA15);
AA16 = fmnaOp (tmp, AA06, AA16);
AA17 = fmnaOp (tmp, AA07, AA17);
AA18 = fmnaOp (tmp, AA08, AA18);
AA19 = fmnaOp (tmp, AA09, AA19);
tmp = AA20;
AA20 = mulOp (negOp(tmp), AA00);
AA21 = fmnaOp (tmp, AA01, AA21);
AA22 = fmnaOp (tmp, AA02, AA22);
AA23 = fmnaOp (tmp, AA03, AA23);
AA24 = fmnaOp (tmp, AA04, AA24);
AA25 = fmnaOp (tmp, AA05, AA25);
AA26 = fmnaOp (tmp, AA06, AA26);
AA27 = fmnaOp (tmp, AA07, AA27);
AA28 = fmnaOp (tmp, AA08, AA28);
AA29 = fmnaOp (tmp, AA09, AA29);
tmp = AA30;
AA30 = mulOp (negOp(tmp), AA00);
AA31 = fmnaOp (tmp, AA01, AA31);
AA32 = fmnaOp (tmp, AA02, AA32);
AA33 = fmnaOp (tmp, AA03, AA33);
AA34 = fmnaOp (tmp, AA04, AA34);
AA35 = fmnaOp (tmp, AA05, AA35);
AA36 = fmnaOp (tmp, AA06, AA36);
AA37 = fmnaOp (tmp, AA07, AA37);
AA38 = fmnaOp (tmp, AA08, AA38);
AA39 = fmnaOp (tmp, AA09, AA39);
tmp = AA40;
AA40 = mulOp (negOp(tmp), AA00);
AA41 = fmnaOp (tmp, AA01, AA41);
AA42 = fmnaOp (tmp, AA02, AA42);
AA43 = fmnaOp (tmp, AA03, AA43);
AA44 = fmnaOp (tmp, AA04, AA44);
AA45 = fmnaOp (tmp, AA05, AA45);
AA46 = fmnaOp (tmp, AA06, AA46);
AA47 = fmnaOp (tmp, AA07, AA47);
AA48 = fmnaOp (tmp, AA08, AA48);
AA49 = fmnaOp (tmp, AA09, AA49);
tmp = AA50;
AA50 = mulOp (negOp(tmp), AA00);
AA51 = fmnaOp (tmp, AA01, AA51);
AA52 = fmnaOp (tmp, AA02, AA52);
AA53 = fmnaOp (tmp, AA03, AA53);
AA54 = fmnaOp (tmp, AA04, AA54);
AA55 = fmnaOp (tmp, AA05, AA55);
AA56 = fmnaOp (tmp, AA06, AA56);
AA57 = fmnaOp (tmp, AA07, AA57);
AA58 = fmnaOp (tmp, AA08, AA58);
AA59 = fmnaOp (tmp, AA09, AA59);
tmp = AA60;
AA60 = mulOp (negOp(tmp), AA00);
AA61 = fmnaOp (tmp, AA01, AA61);
AA62 = fmnaOp (tmp, AA02, AA62);
AA63 = fmnaOp (tmp, AA03, AA63);
AA64 = fmnaOp (tmp, AA04, AA64);
AA65 = fmnaOp (tmp, AA05, AA65);
AA66 = fmnaOp (tmp, AA06, AA66);
AA67 = fmnaOp (tmp, AA07, AA67);
AA68 = fmnaOp (tmp, AA08, AA68);
AA69 = fmnaOp (tmp, AA09, AA69);
tmp = AA70;
AA70 = mulOp (negOp(tmp), AA00);
AA71 = fmnaOp (tmp, AA01, AA71);
AA72 = fmnaOp (tmp, AA02, AA72);
AA73 = fmnaOp (tmp, AA03, AA73);
AA74 = fmnaOp (tmp, AA04, AA74);
AA75 = fmnaOp (tmp, AA05, AA75);
AA76 = fmnaOp (tmp, AA06, AA76);
AA77 = fmnaOp (tmp, AA07, AA77);
AA78 = fmnaOp (tmp, AA08, AA78);
AA79 = fmnaOp (tmp, AA09, AA79);
tmp = AA80;
AA80 = mulOp (negOp(tmp), AA00);
AA81 = fmnaOp (tmp, AA01, AA81);
AA82 = fmnaOp (tmp, AA02, AA82);
AA83 = fmnaOp (tmp, AA03, AA83);
AA84 = fmnaOp (tmp, AA04, AA84);
AA85 = fmnaOp (tmp, AA05, AA85);
AA86 = fmnaOp (tmp, AA06, AA86);
AA87 = fmnaOp (tmp, AA07, AA87);
AA88 = fmnaOp (tmp, AA08, AA88);
AA89 = fmnaOp (tmp, AA09, AA89);
tmp = AA90;
AA90 = mulOp (negOp(tmp), AA00);
AA91 = fmnaOp (tmp, AA01, AA91);
AA92 = fmnaOp (tmp, AA02, AA92);
AA93 = fmnaOp (tmp, AA03, AA93);
AA94 = fmnaOp (tmp, AA04, AA94);
AA95 = fmnaOp (tmp, AA05, AA95);
AA96 = fmnaOp (tmp, AA06, AA96);
AA97 = fmnaOp (tmp, AA07, AA97);
AA98 = fmnaOp (tmp, AA08, AA98);
AA99 = fmnaOp (tmp, AA09, AA99);
/****************** iteration 1 ***********/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA11);
pvt = 1;
t = absOp (AA21);
if (t > p) { p = t; pvt = 2; }
t = absOp (AA31);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA41);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA51);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA61);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA71);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA81);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA91);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 1 */
if (pvt == 2) {
tmp = AA10; AA10 = AA20; AA20 = tmp;
tmp = AA11; AA11 = AA21; AA21 = tmp;
tmp = AA12; AA12 = AA22; AA22 = tmp;
tmp = AA13; AA13 = AA23; AA23 = tmp;
tmp = AA14; AA14 = AA24; AA24 = tmp;
tmp = AA15; AA15 = AA25; AA25 = tmp;
tmp = AA16; AA16 = AA26; AA26 = tmp;
tmp = AA17; AA17 = AA27; AA27 = tmp;
tmp = AA18; AA18 = AA28; AA28 = tmp;
tmp = AA19; AA19 = AA29; AA29 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm2; perm2 = i;
}
if (pvt == 3) {
tmp = AA10; AA10 = AA30; AA30 = tmp;
tmp = AA11; AA11 = AA31; AA31 = tmp;
tmp = AA12; AA12 = AA32; AA32 = tmp;
tmp = AA13; AA13 = AA33; AA33 = tmp;
tmp = AA14; AA14 = AA34; AA34 = tmp;
tmp = AA15; AA15 = AA35; AA35 = tmp;
tmp = AA16; AA16 = AA36; AA36 = tmp;
tmp = AA17; AA17 = AA37; AA37 = tmp;
tmp = AA18; AA18 = AA38; AA38 = tmp;
tmp = AA19; AA19 = AA39; AA39 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA10; AA10 = AA40; AA40 = tmp;
tmp = AA11; AA11 = AA41; AA41 = tmp;
tmp = AA12; AA12 = AA42; AA42 = tmp;
tmp = AA13; AA13 = AA43; AA43 = tmp;
tmp = AA14; AA14 = AA44; AA44 = tmp;
tmp = AA15; AA15 = AA45; AA45 = tmp;
tmp = AA16; AA16 = AA46; AA46 = tmp;
tmp = AA17; AA17 = AA47; AA47 = tmp;
tmp = AA18; AA18 = AA48; AA48 = tmp;
tmp = AA19; AA19 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA10; AA10 = AA50; AA50 = tmp;
tmp = AA11; AA11 = AA51; AA51 = tmp;
tmp = AA12; AA12 = AA52; AA52 = tmp;
tmp = AA13; AA13 = AA53; AA53 = tmp;
tmp = AA14; AA14 = AA54; AA54 = tmp;
tmp = AA15; AA15 = AA55; AA55 = tmp;
tmp = AA16; AA16 = AA56; AA56 = tmp;
tmp = AA17; AA17 = AA57; AA57 = tmp;
tmp = AA18; AA18 = AA58; AA58 = tmp;
tmp = AA19; AA19 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA10; AA10 = AA60; AA60 = tmp;
tmp = AA11; AA11 = AA61; AA61 = tmp;
tmp = AA12; AA12 = AA62; AA62 = tmp;
tmp = AA13; AA13 = AA63; AA63 = tmp;
tmp = AA14; AA14 = AA64; AA64 = tmp;
tmp = AA15; AA15 = AA65; AA65 = tmp;
tmp = AA16; AA16 = AA66; AA66 = tmp;
tmp = AA17; AA17 = AA67; AA67 = tmp;
tmp = AA18; AA18 = AA68; AA68 = tmp;
tmp = AA19; AA19 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA10; AA10 = AA70; AA70 = tmp;
tmp = AA11; AA11 = AA71; AA71 = tmp;
tmp = AA12; AA12 = AA72; AA72 = tmp;
tmp = AA13; AA13 = AA73; AA73 = tmp;
tmp = AA14; AA14 = AA74; AA74 = tmp;
tmp = AA15; AA15 = AA75; AA75 = tmp;
tmp = AA16; AA16 = AA76; AA76 = tmp;
tmp = AA17; AA17 = AA77; AA77 = tmp;
tmp = AA18; AA18 = AA78; AA78 = tmp;
tmp = AA19; AA19 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA10; AA10 = AA80; AA80 = tmp;
tmp = AA11; AA11 = AA81; AA81 = tmp;
tmp = AA12; AA12 = AA82; AA82 = tmp;
tmp = AA13; AA13 = AA83; AA83 = tmp;
tmp = AA14; AA14 = AA84; AA84 = tmp;
tmp = AA15; AA15 = AA85; AA85 = tmp;
tmp = AA16; AA16 = AA86; AA86 = tmp;
tmp = AA17; AA17 = AA87; AA87 = tmp;
tmp = AA18; AA18 = AA88; AA88 = tmp;
tmp = AA19; AA19 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA10; AA10 = AA90; AA90 = tmp;
tmp = AA11; AA11 = AA91; AA91 = tmp;
tmp = AA12; AA12 = AA92; AA92 = tmp;
tmp = AA13; AA13 = AA93; AA93 = tmp;
tmp = AA14; AA14 = AA94; AA94 = tmp;
tmp = AA15; AA15 = AA95; AA95 = tmp;
tmp = AA16; AA16 = AA96; AA96 = tmp;
tmp = AA17; AA17 = AA97; AA97 = tmp;
tmp = AA18; AA18 = AA98; AA98 = tmp;
tmp = AA19; AA19 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm1; perm1 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA11);
icol1 = perm1;
AA10 = mulOp (tmp, AA10);
AA11 = tmp;
AA12 = mulOp (tmp, AA12);
AA13 = mulOp (tmp, AA13);
AA14 = mulOp (tmp, AA14);
AA15 = mulOp (tmp, AA15);
AA16 = mulOp (tmp, AA16);
AA17 = mulOp (tmp, AA17);
AA18 = mulOp (tmp, AA18);
AA19 = mulOp (tmp, AA19);
/* eliminate above and below current row */
tmp = AA01;
AA00 = fmnaOp (tmp, AA10, AA00);
AA01 = mulOp (negOp(tmp), AA11);
AA02 = fmnaOp (tmp, AA12, AA02);
AA03 = fmnaOp (tmp, AA13, AA03);
AA04 = fmnaOp (tmp, AA14, AA04);
AA05 = fmnaOp (tmp, AA15, AA05);
AA06 = fmnaOp (tmp, AA16, AA06);
AA07 = fmnaOp (tmp, AA17, AA07);
AA08 = fmnaOp (tmp, AA18, AA08);
AA09 = fmnaOp (tmp, AA19, AA09);
tmp = AA21;
AA20 = fmnaOp (tmp, AA10, AA20);
AA21 = mulOp (negOp(tmp), AA11);
AA22 = fmnaOp (tmp, AA12, AA22);
AA23 = fmnaOp (tmp, AA13, AA23);
AA24 = fmnaOp (tmp, AA14, AA24);
AA25 = fmnaOp (tmp, AA15, AA25);
AA26 = fmnaOp (tmp, AA16, AA26);
AA27 = fmnaOp (tmp, AA17, AA27);
AA28 = fmnaOp (tmp, AA18, AA28);
AA29 = fmnaOp (tmp, AA19, AA29);
tmp = AA31;
AA30 = fmnaOp (tmp, AA10, AA30);
AA31 = mulOp (negOp(tmp), AA11);
AA32 = fmnaOp (tmp, AA12, AA32);
AA33 = fmnaOp (tmp, AA13, AA33);
AA34 = fmnaOp (tmp, AA14, AA34);
AA35 = fmnaOp (tmp, AA15, AA35);
AA36 = fmnaOp (tmp, AA16, AA36);
AA37 = fmnaOp (tmp, AA17, AA37);
AA38 = fmnaOp (tmp, AA18, AA38);
AA39 = fmnaOp (tmp, AA19, AA39);
tmp = AA41;
AA40 = fmnaOp (tmp, AA10, AA40);
AA41 = mulOp (negOp(tmp), AA11);
AA42 = fmnaOp (tmp, AA12, AA42);
AA43 = fmnaOp (tmp, AA13, AA43);
AA44 = fmnaOp (tmp, AA14, AA44);
AA45 = fmnaOp (tmp, AA15, AA45);
AA46 = fmnaOp (tmp, AA16, AA46);
AA47 = fmnaOp (tmp, AA17, AA47);
AA48 = fmnaOp (tmp, AA18, AA48);
AA49 = fmnaOp (tmp, AA19, AA49);
tmp = AA51;
AA50 = fmnaOp (tmp, AA10, AA50);
AA51 = mulOp (negOp(tmp), AA11);
AA52 = fmnaOp (tmp, AA12, AA52);
AA53 = fmnaOp (tmp, AA13, AA53);
AA54 = fmnaOp (tmp, AA14, AA54);
AA55 = fmnaOp (tmp, AA15, AA55);
AA56 = fmnaOp (tmp, AA16, AA56);
AA57 = fmnaOp (tmp, AA17, AA57);
AA58 = fmnaOp (tmp, AA18, AA58);
AA59 = fmnaOp (tmp, AA19, AA59);
tmp = AA61;
AA60 = fmnaOp (tmp, AA10, AA60);
AA61 = mulOp (negOp(tmp), AA11);
AA62 = fmnaOp (tmp, AA12, AA62);
AA63 = fmnaOp (tmp, AA13, AA63);
AA64 = fmnaOp (tmp, AA14, AA64);
AA65 = fmnaOp (tmp, AA15, AA65);
AA66 = fmnaOp (tmp, AA16, AA66);
AA67 = fmnaOp (tmp, AA17, AA67);
AA68 = fmnaOp (tmp, AA18, AA68);
AA69 = fmnaOp (tmp, AA19, AA69);
tmp = AA71;
AA70 = fmnaOp (tmp, AA10, AA70);
AA71 = mulOp (negOp(tmp), AA11);
AA72 = fmnaOp (tmp, AA12, AA72);
AA73 = fmnaOp (tmp, AA13, AA73);
AA74 = fmnaOp (tmp, AA14, AA74);
AA75 = fmnaOp (tmp, AA15, AA75);
AA76 = fmnaOp (tmp, AA16, AA76);
AA77 = fmnaOp (tmp, AA17, AA77);
AA78 = fmnaOp (tmp, AA18, AA78);
AA79 = fmnaOp (tmp, AA19, AA79);
tmp = AA81;
AA80 = fmnaOp (tmp, AA10, AA80);
AA81 = mulOp (negOp(tmp), AA11);
AA82 = fmnaOp (tmp, AA12, AA82);
AA83 = fmnaOp (tmp, AA13, AA83);
AA84 = fmnaOp (tmp, AA14, AA84);
AA85 = fmnaOp (tmp, AA15, AA85);
AA86 = fmnaOp (tmp, AA16, AA86);
AA87 = fmnaOp (tmp, AA17, AA87);
AA88 = fmnaOp (tmp, AA18, AA88);
AA89 = fmnaOp (tmp, AA19, AA89);
tmp = AA91;
AA90 = fmnaOp (tmp, AA10, AA90);
AA91 = mulOp (negOp(tmp), AA11);
AA92 = fmnaOp (tmp, AA12, AA92);
AA93 = fmnaOp (tmp, AA13, AA93);
AA94 = fmnaOp (tmp, AA14, AA94);
AA95 = fmnaOp (tmp, AA15, AA95);
AA96 = fmnaOp (tmp, AA16, AA96);
AA97 = fmnaOp (tmp, AA17, AA97);
AA98 = fmnaOp (tmp, AA18, AA98);
AA99 = fmnaOp (tmp, AA19, AA99);
/****************** iteration 2 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA22);
pvt = 2;
t = absOp (AA32);
if (t > p) { p = t; pvt = 3; }
t = absOp (AA42);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA52);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA62);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA72);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA82);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA92);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 2 */
if (pvt == 3) {
tmp = AA20; AA20 = AA30; AA30 = tmp;
tmp = AA21; AA21 = AA31; AA31 = tmp;
tmp = AA22; AA22 = AA32; AA32 = tmp;
tmp = AA23; AA23 = AA33; AA33 = tmp;
tmp = AA24; AA24 = AA34; AA34 = tmp;
tmp = AA25; AA25 = AA35; AA35 = tmp;
tmp = AA26; AA26 = AA36; AA36 = tmp;
tmp = AA27; AA27 = AA37; AA37 = tmp;
tmp = AA28; AA28 = AA38; AA38 = tmp;
tmp = AA29; AA29 = AA39; AA39 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm3; perm3 = i;
}
if (pvt == 4) {
tmp = AA20; AA20 = AA40; AA40 = tmp;
tmp = AA21; AA21 = AA41; AA41 = tmp;
tmp = AA22; AA22 = AA42; AA42 = tmp;
tmp = AA23; AA23 = AA43; AA43 = tmp;
tmp = AA24; AA24 = AA44; AA44 = tmp;
tmp = AA25; AA25 = AA45; AA45 = tmp;
tmp = AA26; AA26 = AA46; AA46 = tmp;
tmp = AA27; AA27 = AA47; AA47 = tmp;
tmp = AA28; AA28 = AA48; AA48 = tmp;
tmp = AA29; AA29 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA20; AA20 = AA50; AA50 = tmp;
tmp = AA21; AA21 = AA51; AA51 = tmp;
tmp = AA22; AA22 = AA52; AA52 = tmp;
tmp = AA23; AA23 = AA53; AA53 = tmp;
tmp = AA24; AA24 = AA54; AA54 = tmp;
tmp = AA25; AA25 = AA55; AA55 = tmp;
tmp = AA26; AA26 = AA56; AA56 = tmp;
tmp = AA27; AA27 = AA57; AA57 = tmp;
tmp = AA28; AA28 = AA58; AA58 = tmp;
tmp = AA29; AA29 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA20; AA20 = AA60; AA60 = tmp;
tmp = AA21; AA21 = AA61; AA61 = tmp;
tmp = AA22; AA22 = AA62; AA62 = tmp;
tmp = AA23; AA23 = AA63; AA63 = tmp;
tmp = AA24; AA24 = AA64; AA64 = tmp;
tmp = AA25; AA25 = AA65; AA65 = tmp;
tmp = AA26; AA26 = AA66; AA66 = tmp;
tmp = AA27; AA27 = AA67; AA67 = tmp;
tmp = AA28; AA28 = AA68; AA68 = tmp;
tmp = AA29; AA29 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA20; AA20 = AA70; AA70 = tmp;
tmp = AA21; AA21 = AA71; AA71 = tmp;
tmp = AA22; AA22 = AA72; AA72 = tmp;
tmp = AA23; AA23 = AA73; AA73 = tmp;
tmp = AA24; AA24 = AA74; AA74 = tmp;
tmp = AA25; AA25 = AA75; AA75 = tmp;
tmp = AA26; AA26 = AA76; AA76 = tmp;
tmp = AA27; AA27 = AA77; AA77 = tmp;
tmp = AA28; AA28 = AA78; AA78 = tmp;
tmp = AA29; AA29 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA20; AA20 = AA80; AA80 = tmp;
tmp = AA21; AA21 = AA81; AA81 = tmp;
tmp = AA22; AA22 = AA82; AA82 = tmp;
tmp = AA23; AA23 = AA83; AA83 = tmp;
tmp = AA24; AA24 = AA84; AA84 = tmp;
tmp = AA25; AA25 = AA85; AA85 = tmp;
tmp = AA26; AA26 = AA86; AA86 = tmp;
tmp = AA27; AA27 = AA87; AA87 = tmp;
tmp = AA28; AA28 = AA88; AA88 = tmp;
tmp = AA29; AA29 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA20; AA20 = AA90; AA90 = tmp;
tmp = AA21; AA21 = AA91; AA91 = tmp;
tmp = AA22; AA22 = AA92; AA92 = tmp;
tmp = AA23; AA23 = AA93; AA93 = tmp;
tmp = AA24; AA24 = AA94; AA94 = tmp;
tmp = AA25; AA25 = AA95; AA95 = tmp;
tmp = AA26; AA26 = AA96; AA96 = tmp;
tmp = AA27; AA27 = AA97; AA97 = tmp;
tmp = AA28; AA28 = AA98; AA98 = tmp;
tmp = AA29; AA29 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm2; perm2 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA22);
icol2 = perm2;
AA20 = mulOp (tmp, AA20);
AA21 = mulOp (tmp, AA21);
AA22 = tmp;
AA23 = mulOp (tmp, AA23);
AA24 = mulOp (tmp, AA24);
AA25 = mulOp (tmp, AA25);
AA26 = mulOp (tmp, AA26);
AA27 = mulOp (tmp, AA27);
AA28 = mulOp (tmp, AA28);
AA29 = mulOp (tmp, AA29);
/* eliminate above and below current row */
tmp = AA02;
AA00 = fmnaOp (tmp, AA20, AA00);
AA01 = fmnaOp (tmp, AA21, AA01);
AA02 = mulOp (negOp(tmp), AA22);
AA03 = fmnaOp (tmp, AA23, AA03);
AA04 = fmnaOp (tmp, AA24, AA04);
AA05 = fmnaOp (tmp, AA25, AA05);
AA06 = fmnaOp (tmp, AA26, AA06);
AA07 = fmnaOp (tmp, AA27, AA07);
AA08 = fmnaOp (tmp, AA28, AA08);
AA09 = fmnaOp (tmp, AA29, AA09);
tmp = AA12;
AA10 = fmnaOp (tmp, AA20, AA10);
AA11 = fmnaOp (tmp, AA21, AA11);
AA12 = mulOp (negOp(tmp), AA22);
AA13 = fmnaOp (tmp, AA23, AA13);
AA14 = fmnaOp (tmp, AA24, AA14);
AA15 = fmnaOp (tmp, AA25, AA15);
AA16 = fmnaOp (tmp, AA26, AA16);
AA17 = fmnaOp (tmp, AA27, AA17);
AA18 = fmnaOp (tmp, AA28, AA18);
AA19 = fmnaOp (tmp, AA29, AA19);
tmp = AA32;
AA30 = fmnaOp (tmp, AA20, AA30);
AA31 = fmnaOp (tmp, AA21, AA31);
AA32 = mulOp (negOp(tmp), AA22);
AA33 = fmnaOp (tmp, AA23, AA33);
AA34 = fmnaOp (tmp, AA24, AA34);
AA35 = fmnaOp (tmp, AA25, AA35);
AA36 = fmnaOp (tmp, AA26, AA36);
AA37 = fmnaOp (tmp, AA27, AA37);
AA38 = fmnaOp (tmp, AA28, AA38);
AA39 = fmnaOp (tmp, AA29, AA39);
tmp = AA42;
AA40 = fmnaOp (tmp, AA20, AA40);
AA41 = fmnaOp (tmp, AA21, AA41);
AA42 = mulOp (negOp(tmp), AA22);
AA43 = fmnaOp (tmp, AA23, AA43);
AA44 = fmnaOp (tmp, AA24, AA44);
AA45 = fmnaOp (tmp, AA25, AA45);
AA46 = fmnaOp (tmp, AA26, AA46);
AA47 = fmnaOp (tmp, AA27, AA47);
AA48 = fmnaOp (tmp, AA28, AA48);
AA49 = fmnaOp (tmp, AA29, AA49);
tmp = AA52;
AA50 = fmnaOp (tmp, AA20, AA50);
AA51 = fmnaOp (tmp, AA21, AA51);
AA52 = mulOp (negOp(tmp), AA22);
AA53 = fmnaOp (tmp, AA23, AA53);
AA54 = fmnaOp (tmp, AA24, AA54);
AA55 = fmnaOp (tmp, AA25, AA55);
AA56 = fmnaOp (tmp, AA26, AA56);
AA57 = fmnaOp (tmp, AA27, AA57);
AA58 = fmnaOp (tmp, AA28, AA58);
AA59 = fmnaOp (tmp, AA29, AA59);
tmp = AA62;
AA60 = fmnaOp (tmp, AA20, AA60);
AA61 = fmnaOp (tmp, AA21, AA61);
AA62 = mulOp (negOp(tmp), AA22);
AA63 = fmnaOp (tmp, AA23, AA63);
AA64 = fmnaOp (tmp, AA24, AA64);
AA65 = fmnaOp (tmp, AA25, AA65);
AA66 = fmnaOp (tmp, AA26, AA66);
AA67 = fmnaOp (tmp, AA27, AA67);
AA68 = fmnaOp (tmp, AA28, AA68);
AA69 = fmnaOp (tmp, AA29, AA69);
tmp = AA72;
AA70 = fmnaOp (tmp, AA20, AA70);
AA71 = fmnaOp (tmp, AA21, AA71);
AA72 = mulOp (negOp(tmp), AA22);
AA73 = fmnaOp (tmp, AA23, AA73);
AA74 = fmnaOp (tmp, AA24, AA74);
AA75 = fmnaOp (tmp, AA25, AA75);
AA76 = fmnaOp (tmp, AA26, AA76);
AA77 = fmnaOp (tmp, AA27, AA77);
AA78 = fmnaOp (tmp, AA28, AA78);
AA79 = fmnaOp (tmp, AA29, AA79);
tmp = AA82;
AA80 = fmnaOp (tmp, AA20, AA80);
AA81 = fmnaOp (tmp, AA21, AA81);
AA82 = mulOp (negOp(tmp), AA22);
AA83 = fmnaOp (tmp, AA23, AA83);
AA84 = fmnaOp (tmp, AA24, AA84);
AA85 = fmnaOp (tmp, AA25, AA85);
AA86 = fmnaOp (tmp, AA26, AA86);
AA87 = fmnaOp (tmp, AA27, AA87);
AA88 = fmnaOp (tmp, AA28, AA88);
AA89 = fmnaOp (tmp, AA29, AA89);
tmp = AA92;
AA90 = fmnaOp (tmp, AA20, AA90);
AA91 = fmnaOp (tmp, AA21, AA91);
AA92 = mulOp (negOp(tmp), AA22);
AA93 = fmnaOp (tmp, AA23, AA93);
AA94 = fmnaOp (tmp, AA24, AA94);
AA95 = fmnaOp (tmp, AA25, AA95);
AA96 = fmnaOp (tmp, AA26, AA96);
AA97 = fmnaOp (tmp, AA27, AA97);
AA98 = fmnaOp (tmp, AA28, AA98);
AA99 = fmnaOp (tmp, AA29, AA99);
/****************** iteration 3 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA33);
pvt = 3;
t = absOp (AA43);
if (t > p) { p = t; pvt = 4; }
t = absOp (AA53);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA63);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA73);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA83);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA93);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 3 */
if (pvt == 4) {
tmp = AA30; AA30 = AA40; AA40 = tmp;
tmp = AA31; AA31 = AA41; AA41 = tmp;
tmp = AA32; AA32 = AA42; AA42 = tmp;
tmp = AA33; AA33 = AA43; AA43 = tmp;
tmp = AA34; AA34 = AA44; AA44 = tmp;
tmp = AA35; AA35 = AA45; AA45 = tmp;
tmp = AA36; AA36 = AA46; AA46 = tmp;
tmp = AA37; AA37 = AA47; AA47 = tmp;
tmp = AA38; AA38 = AA48; AA48 = tmp;
tmp = AA39; AA39 = AA49; AA49 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm4; perm4 = i;
}
if (pvt == 5) {
tmp = AA30; AA30 = AA50; AA50 = tmp;
tmp = AA31; AA31 = AA51; AA51 = tmp;
tmp = AA32; AA32 = AA52; AA52 = tmp;
tmp = AA33; AA33 = AA53; AA53 = tmp;
tmp = AA34; AA34 = AA54; AA54 = tmp;
tmp = AA35; AA35 = AA55; AA55 = tmp;
tmp = AA36; AA36 = AA56; AA56 = tmp;
tmp = AA37; AA37 = AA57; AA57 = tmp;
tmp = AA38; AA38 = AA58; AA58 = tmp;
tmp = AA39; AA39 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA30; AA30 = AA60; AA60 = tmp;
tmp = AA31; AA31 = AA61; AA61 = tmp;
tmp = AA32; AA32 = AA62; AA62 = tmp;
tmp = AA33; AA33 = AA63; AA63 = tmp;
tmp = AA34; AA34 = AA64; AA64 = tmp;
tmp = AA35; AA35 = AA65; AA65 = tmp;
tmp = AA36; AA36 = AA66; AA66 = tmp;
tmp = AA37; AA37 = AA67; AA67 = tmp;
tmp = AA38; AA38 = AA68; AA68 = tmp;
tmp = AA39; AA39 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA30; AA30 = AA70; AA70 = tmp;
tmp = AA31; AA31 = AA71; AA71 = tmp;
tmp = AA32; AA32 = AA72; AA72 = tmp;
tmp = AA33; AA33 = AA73; AA73 = tmp;
tmp = AA34; AA34 = AA74; AA74 = tmp;
tmp = AA35; AA35 = AA75; AA75 = tmp;
tmp = AA36; AA36 = AA76; AA76 = tmp;
tmp = AA37; AA37 = AA77; AA77 = tmp;
tmp = AA38; AA38 = AA78; AA78 = tmp;
tmp = AA39; AA39 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA30; AA30 = AA80; AA80 = tmp;
tmp = AA31; AA31 = AA81; AA81 = tmp;
tmp = AA32; AA32 = AA82; AA82 = tmp;
tmp = AA33; AA33 = AA83; AA83 = tmp;
tmp = AA34; AA34 = AA84; AA84 = tmp;
tmp = AA35; AA35 = AA85; AA85 = tmp;
tmp = AA36; AA36 = AA86; AA86 = tmp;
tmp = AA37; AA37 = AA87; AA87 = tmp;
tmp = AA38; AA38 = AA88; AA88 = tmp;
tmp = AA39; AA39 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA30; AA30 = AA90; AA90 = tmp;
tmp = AA31; AA31 = AA91; AA91 = tmp;
tmp = AA32; AA32 = AA92; AA92 = tmp;
tmp = AA33; AA33 = AA93; AA93 = tmp;
tmp = AA34; AA34 = AA94; AA94 = tmp;
tmp = AA35; AA35 = AA95; AA95 = tmp;
tmp = AA36; AA36 = AA96; AA96 = tmp;
tmp = AA37; AA37 = AA97; AA97 = tmp;
tmp = AA38; AA38 = AA98; AA98 = tmp;
tmp = AA39; AA39 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm3; perm3 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA33);
icol3 = perm3;
AA30 = mulOp (tmp, AA30);
AA31 = mulOp (tmp, AA31);
AA32 = mulOp (tmp, AA32);
AA33 = tmp;
AA34 = mulOp (tmp, AA34);
AA35 = mulOp (tmp, AA35);
AA36 = mulOp (tmp, AA36);
AA37 = mulOp (tmp, AA37);
AA38 = mulOp (tmp, AA38);
AA39 = mulOp (tmp, AA39);
/* eliminate above and below current row */
tmp = AA03;
AA00 = fmnaOp (tmp, AA30, AA00);
AA01 = fmnaOp (tmp, AA31, AA01);
AA02 = fmnaOp (tmp, AA32, AA02);
AA03 = mulOp (negOp(tmp), AA33);
AA04 = fmnaOp (tmp, AA34, AA04);
AA05 = fmnaOp (tmp, AA35, AA05);
AA06 = fmnaOp (tmp, AA36, AA06);
AA07 = fmnaOp (tmp, AA37, AA07);
AA08 = fmnaOp (tmp, AA38, AA08);
AA09 = fmnaOp (tmp, AA39, AA09);
tmp = AA13;
AA10 = fmnaOp (tmp, AA30, AA10);
AA11 = fmnaOp (tmp, AA31, AA11);
AA12 = fmnaOp (tmp, AA32, AA12);
AA13 = mulOp (negOp(tmp), AA33);
AA14 = fmnaOp (tmp, AA34, AA14);
AA15 = fmnaOp (tmp, AA35, AA15);
AA16 = fmnaOp (tmp, AA36, AA16);
AA17 = fmnaOp (tmp, AA37, AA17);
AA18 = fmnaOp (tmp, AA38, AA18);
AA19 = fmnaOp (tmp, AA39, AA19);
tmp = AA23;
AA20 = fmnaOp (tmp, AA30, AA20);
AA21 = fmnaOp (tmp, AA31, AA21);
AA22 = fmnaOp (tmp, AA32, AA22);
AA23 = mulOp (negOp(tmp), AA33);
AA24 = fmnaOp (tmp, AA34, AA24);
AA25 = fmnaOp (tmp, AA35, AA25);
AA26 = fmnaOp (tmp, AA36, AA26);
AA27 = fmnaOp (tmp, AA37, AA27);
AA28 = fmnaOp (tmp, AA38, AA28);
AA29 = fmnaOp (tmp, AA39, AA29);
tmp = AA43;
AA40 = fmnaOp (tmp, AA30, AA40);
AA41 = fmnaOp (tmp, AA31, AA41);
AA42 = fmnaOp (tmp, AA32, AA42);
AA43 = mulOp (negOp(tmp), AA33);
AA44 = fmnaOp (tmp, AA34, AA44);
AA45 = fmnaOp (tmp, AA35, AA45);
AA46 = fmnaOp (tmp, AA36, AA46);
AA47 = fmnaOp (tmp, AA37, AA47);
AA48 = fmnaOp (tmp, AA38, AA48);
AA49 = fmnaOp (tmp, AA39, AA49);
tmp = AA53;
AA50 = fmnaOp (tmp, AA30, AA50);
AA51 = fmnaOp (tmp, AA31, AA51);
AA52 = fmnaOp (tmp, AA32, AA52);
AA53 = mulOp (negOp(tmp), AA33);
AA54 = fmnaOp (tmp, AA34, AA54);
AA55 = fmnaOp (tmp, AA35, AA55);
AA56 = fmnaOp (tmp, AA36, AA56);
AA57 = fmnaOp (tmp, AA37, AA57);
AA58 = fmnaOp (tmp, AA38, AA58);
AA59 = fmnaOp (tmp, AA39, AA59);
tmp = AA63;
AA60 = fmnaOp (tmp, AA30, AA60);
AA61 = fmnaOp (tmp, AA31, AA61);
AA62 = fmnaOp (tmp, AA32, AA62);
AA63 = mulOp (negOp(tmp), AA33);
AA64 = fmnaOp (tmp, AA34, AA64);
AA65 = fmnaOp (tmp, AA35, AA65);
AA66 = fmnaOp (tmp, AA36, AA66);
AA67 = fmnaOp (tmp, AA37, AA67);
AA68 = fmnaOp (tmp, AA38, AA68);
AA69 = fmnaOp (tmp, AA39, AA69);
tmp = AA73;
AA70 = fmnaOp (tmp, AA30, AA70);
AA71 = fmnaOp (tmp, AA31, AA71);
AA72 = fmnaOp (tmp, AA32, AA72);
AA73 = mulOp (negOp(tmp), AA33);
AA74 = fmnaOp (tmp, AA34, AA74);
AA75 = fmnaOp (tmp, AA35, AA75);
AA76 = fmnaOp (tmp, AA36, AA76);
AA77 = fmnaOp (tmp, AA37, AA77);
AA78 = fmnaOp (tmp, AA38, AA78);
AA79 = fmnaOp (tmp, AA39, AA79);
tmp = AA83;
AA80 = fmnaOp (tmp, AA30, AA80);
AA81 = fmnaOp (tmp, AA31, AA81);
AA82 = fmnaOp (tmp, AA32, AA82);
AA83 = mulOp (negOp(tmp), AA33);
AA84 = fmnaOp (tmp, AA34, AA84);
AA85 = fmnaOp (tmp, AA35, AA85);
AA86 = fmnaOp (tmp, AA36, AA86);
AA87 = fmnaOp (tmp, AA37, AA87);
AA88 = fmnaOp (tmp, AA38, AA88);
AA89 = fmnaOp (tmp, AA39, AA89);
tmp = AA93;
AA90 = fmnaOp (tmp, AA30, AA90);
AA91 = fmnaOp (tmp, AA31, AA91);
AA92 = fmnaOp (tmp, AA32, AA92);
AA93 = mulOp (negOp(tmp), AA33);
AA94 = fmnaOp (tmp, AA34, AA94);
AA95 = fmnaOp (tmp, AA35, AA95);
AA96 = fmnaOp (tmp, AA36, AA96);
AA97 = fmnaOp (tmp, AA37, AA97);
AA98 = fmnaOp (tmp, AA38, AA98);
AA99 = fmnaOp (tmp, AA39, AA99);
/****************** iteration 4 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA44);
pvt = 4;
t = absOp (AA54);
if (t > p) { p = t; pvt = 5; }
t = absOp (AA64);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA74);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA84);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA94);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 4 */
if (pvt == 5) {
tmp = AA40; AA40 = AA50; AA50 = tmp;
tmp = AA41; AA41 = AA51; AA51 = tmp;
tmp = AA42; AA42 = AA52; AA52 = tmp;
tmp = AA43; AA43 = AA53; AA53 = tmp;
tmp = AA44; AA44 = AA54; AA54 = tmp;
tmp = AA45; AA45 = AA55; AA55 = tmp;
tmp = AA46; AA46 = AA56; AA56 = tmp;
tmp = AA47; AA47 = AA57; AA57 = tmp;
tmp = AA48; AA48 = AA58; AA58 = tmp;
tmp = AA49; AA49 = AA59; AA59 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm5; perm5 = i;
}
if (pvt == 6) {
tmp = AA40; AA40 = AA60; AA60 = tmp;
tmp = AA41; AA41 = AA61; AA61 = tmp;
tmp = AA42; AA42 = AA62; AA62 = tmp;
tmp = AA43; AA43 = AA63; AA63 = tmp;
tmp = AA44; AA44 = AA64; AA64 = tmp;
tmp = AA45; AA45 = AA65; AA65 = tmp;
tmp = AA46; AA46 = AA66; AA66 = tmp;
tmp = AA47; AA47 = AA67; AA67 = tmp;
tmp = AA48; AA48 = AA68; AA68 = tmp;
tmp = AA49; AA49 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA40; AA40 = AA70; AA70 = tmp;
tmp = AA41; AA41 = AA71; AA71 = tmp;
tmp = AA42; AA42 = AA72; AA72 = tmp;
tmp = AA43; AA43 = AA73; AA73 = tmp;
tmp = AA44; AA44 = AA74; AA74 = tmp;
tmp = AA45; AA45 = AA75; AA75 = tmp;
tmp = AA46; AA46 = AA76; AA76 = tmp;
tmp = AA47; AA47 = AA77; AA77 = tmp;
tmp = AA48; AA48 = AA78; AA78 = tmp;
tmp = AA49; AA49 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA40; AA40 = AA80; AA80 = tmp;
tmp = AA41; AA41 = AA81; AA81 = tmp;
tmp = AA42; AA42 = AA82; AA82 = tmp;
tmp = AA43; AA43 = AA83; AA83 = tmp;
tmp = AA44; AA44 = AA84; AA84 = tmp;
tmp = AA45; AA45 = AA85; AA85 = tmp;
tmp = AA46; AA46 = AA86; AA86 = tmp;
tmp = AA47; AA47 = AA87; AA87 = tmp;
tmp = AA48; AA48 = AA88; AA88 = tmp;
tmp = AA49; AA49 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA40; AA40 = AA90; AA90 = tmp;
tmp = AA41; AA41 = AA91; AA91 = tmp;
tmp = AA42; AA42 = AA92; AA92 = tmp;
tmp = AA43; AA43 = AA93; AA93 = tmp;
tmp = AA44; AA44 = AA94; AA94 = tmp;
tmp = AA45; AA45 = AA95; AA95 = tmp;
tmp = AA46; AA46 = AA96; AA96 = tmp;
tmp = AA47; AA47 = AA97; AA97 = tmp;
tmp = AA48; AA48 = AA98; AA98 = tmp;
tmp = AA49; AA49 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm4; perm4 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA44);
icol4 = perm4;
AA40 = mulOp (tmp, AA40);
AA41 = mulOp (tmp, AA41);
AA42 = mulOp (tmp, AA42);
AA43 = mulOp (tmp, AA43);
AA44 = tmp;
AA45 = mulOp (tmp, AA45);
AA46 = mulOp (tmp, AA46);
AA47 = mulOp (tmp, AA47);
AA48 = mulOp (tmp, AA48);
AA49 = mulOp (tmp, AA49);
/* eliminate above and below current row */
tmp = AA04;
AA00 = fmnaOp (tmp, AA40, AA00);
AA01 = fmnaOp (tmp, AA41, AA01);
AA02 = fmnaOp (tmp, AA42, AA02);
AA03 = fmnaOp (tmp, AA43, AA03);
AA04 = mulOp (negOp(tmp), AA44);
AA05 = fmnaOp (tmp, AA45, AA05);
AA06 = fmnaOp (tmp, AA46, AA06);
AA07 = fmnaOp (tmp, AA47, AA07);
AA08 = fmnaOp (tmp, AA48, AA08);
AA09 = fmnaOp (tmp, AA49, AA09);
tmp = AA14;
AA10 = fmnaOp (tmp, AA40, AA10);
AA11 = fmnaOp (tmp, AA41, AA11);
AA12 = fmnaOp (tmp, AA42, AA12);
AA13 = fmnaOp (tmp, AA43, AA13);
AA14 = mulOp (negOp(tmp), AA44);
AA15 = fmnaOp (tmp, AA45, AA15);
AA16 = fmnaOp (tmp, AA46, AA16);
AA17 = fmnaOp (tmp, AA47, AA17);
AA18 = fmnaOp (tmp, AA48, AA18);
AA19 = fmnaOp (tmp, AA49, AA19);
tmp = AA24;
AA20 = fmnaOp (tmp, AA40, AA20);
AA21 = fmnaOp (tmp, AA41, AA21);
AA22 = fmnaOp (tmp, AA42, AA22);
AA23 = fmnaOp (tmp, AA43, AA23);
AA24 = mulOp (negOp(tmp), AA44);
AA25 = fmnaOp (tmp, AA45, AA25);
AA26 = fmnaOp (tmp, AA46, AA26);
AA27 = fmnaOp (tmp, AA47, AA27);
AA28 = fmnaOp (tmp, AA48, AA28);
AA29 = fmnaOp (tmp, AA49, AA29);
tmp = AA34;
AA30 = fmnaOp (tmp, AA40, AA30);
AA31 = fmnaOp (tmp, AA41, AA31);
AA32 = fmnaOp (tmp, AA42, AA32);
AA33 = fmnaOp (tmp, AA43, AA33);
AA34 = mulOp (negOp(tmp), AA44);
AA35 = fmnaOp (tmp, AA45, AA35);
AA36 = fmnaOp (tmp, AA46, AA36);
AA37 = fmnaOp (tmp, AA47, AA37);
AA38 = fmnaOp (tmp, AA48, AA38);
AA39 = fmnaOp (tmp, AA49, AA39);
tmp = AA54;
AA50 = fmnaOp (tmp, AA40, AA50);
AA51 = fmnaOp (tmp, AA41, AA51);
AA52 = fmnaOp (tmp, AA42, AA52);
AA53 = fmnaOp (tmp, AA43, AA53);
AA54 = mulOp (negOp(tmp), AA44);
AA55 = fmnaOp (tmp, AA45, AA55);
AA56 = fmnaOp (tmp, AA46, AA56);
AA57 = fmnaOp (tmp, AA47, AA57);
AA58 = fmnaOp (tmp, AA48, AA58);
AA59 = fmnaOp (tmp, AA49, AA59);
tmp = AA64;
AA60 = fmnaOp (tmp, AA40, AA60);
AA61 = fmnaOp (tmp, AA41, AA61);
AA62 = fmnaOp (tmp, AA42, AA62);
AA63 = fmnaOp (tmp, AA43, AA63);
AA64 = mulOp (negOp(tmp), AA44);
AA65 = fmnaOp (tmp, AA45, AA65);
AA66 = fmnaOp (tmp, AA46, AA66);
AA67 = fmnaOp (tmp, AA47, AA67);
AA68 = fmnaOp (tmp, AA48, AA68);
AA69 = fmnaOp (tmp, AA49, AA69);
tmp = AA74;
AA70 = fmnaOp (tmp, AA40, AA70);
AA71 = fmnaOp (tmp, AA41, AA71);
AA72 = fmnaOp (tmp, AA42, AA72);
AA73 = fmnaOp (tmp, AA43, AA73);
AA74 = mulOp (negOp(tmp), AA44);
AA75 = fmnaOp (tmp, AA45, AA75);
AA76 = fmnaOp (tmp, AA46, AA76);
AA77 = fmnaOp (tmp, AA47, AA77);
AA78 = fmnaOp (tmp, AA48, AA78);
AA79 = fmnaOp (tmp, AA49, AA79);
tmp = AA84;
AA80 = fmnaOp (tmp, AA40, AA80);
AA81 = fmnaOp (tmp, AA41, AA81);
AA82 = fmnaOp (tmp, AA42, AA82);
AA83 = fmnaOp (tmp, AA43, AA83);
AA84 = mulOp (negOp(tmp), AA44);
AA85 = fmnaOp (tmp, AA45, AA85);
AA86 = fmnaOp (tmp, AA46, AA86);
AA87 = fmnaOp (tmp, AA47, AA87);
AA88 = fmnaOp (tmp, AA48, AA88);
AA89 = fmnaOp (tmp, AA49, AA89);
tmp = AA94;
AA90 = fmnaOp (tmp, AA40, AA90);
AA91 = fmnaOp (tmp, AA41, AA91);
AA92 = fmnaOp (tmp, AA42, AA92);
AA93 = fmnaOp (tmp, AA43, AA93);
AA94 = mulOp (negOp(tmp), AA44);
AA95 = fmnaOp (tmp, AA45, AA95);
AA96 = fmnaOp (tmp, AA46, AA96);
AA97 = fmnaOp (tmp, AA47, AA97);
AA98 = fmnaOp (tmp, AA48, AA98);
AA99 = fmnaOp (tmp, AA49, AA99);
/****************** iteration 5 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA55);
pvt = 5;
t = absOp (AA65);
if (t > p) { p = t; pvt = 6; }
t = absOp (AA75);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA85);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA95);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 5 */
if (pvt == 6) {
tmp = AA50; AA50 = AA60; AA60 = tmp;
tmp = AA51; AA51 = AA61; AA61 = tmp;
tmp = AA52; AA52 = AA62; AA62 = tmp;
tmp = AA53; AA53 = AA63; AA63 = tmp;
tmp = AA54; AA54 = AA64; AA64 = tmp;
tmp = AA55; AA55 = AA65; AA65 = tmp;
tmp = AA56; AA56 = AA66; AA66 = tmp;
tmp = AA57; AA57 = AA67; AA67 = tmp;
tmp = AA58; AA58 = AA68; AA68 = tmp;
tmp = AA59; AA59 = AA69; AA69 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm6; perm6 = i;
}
if (pvt == 7) {
tmp = AA50; AA50 = AA70; AA70 = tmp;
tmp = AA51; AA51 = AA71; AA71 = tmp;
tmp = AA52; AA52 = AA72; AA72 = tmp;
tmp = AA53; AA53 = AA73; AA73 = tmp;
tmp = AA54; AA54 = AA74; AA74 = tmp;
tmp = AA55; AA55 = AA75; AA75 = tmp;
tmp = AA56; AA56 = AA76; AA76 = tmp;
tmp = AA57; AA57 = AA77; AA77 = tmp;
tmp = AA58; AA58 = AA78; AA78 = tmp;
tmp = AA59; AA59 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA50; AA50 = AA80; AA80 = tmp;
tmp = AA51; AA51 = AA81; AA81 = tmp;
tmp = AA52; AA52 = AA82; AA82 = tmp;
tmp = AA53; AA53 = AA83; AA83 = tmp;
tmp = AA54; AA54 = AA84; AA84 = tmp;
tmp = AA55; AA55 = AA85; AA85 = tmp;
tmp = AA56; AA56 = AA86; AA86 = tmp;
tmp = AA57; AA57 = AA87; AA87 = tmp;
tmp = AA58; AA58 = AA88; AA88 = tmp;
tmp = AA59; AA59 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA50; AA50 = AA90; AA90 = tmp;
tmp = AA51; AA51 = AA91; AA91 = tmp;
tmp = AA52; AA52 = AA92; AA92 = tmp;
tmp = AA53; AA53 = AA93; AA93 = tmp;
tmp = AA54; AA54 = AA94; AA94 = tmp;
tmp = AA55; AA55 = AA95; AA95 = tmp;
tmp = AA56; AA56 = AA96; AA96 = tmp;
tmp = AA57; AA57 = AA97; AA97 = tmp;
tmp = AA58; AA58 = AA98; AA98 = tmp;
tmp = AA59; AA59 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm5; perm5 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA55);
icol5 = perm5;
AA50 = mulOp (tmp, AA50);
AA51 = mulOp (tmp, AA51);
AA52 = mulOp (tmp, AA52);
AA53 = mulOp (tmp, AA53);
AA54 = mulOp (tmp, AA54);
AA55 = tmp;
AA56 = mulOp (tmp, AA56);
AA57 = mulOp (tmp, AA57);
AA58 = mulOp (tmp, AA58);
AA59 = mulOp (tmp, AA59);
/* eliminate above and below current row */
tmp = AA05;
AA00 = fmnaOp (tmp, AA50, AA00);
AA01 = fmnaOp (tmp, AA51, AA01);
AA02 = fmnaOp (tmp, AA52, AA02);
AA03 = fmnaOp (tmp, AA53, AA03);
AA04 = fmnaOp (tmp, AA54, AA04);
AA05 = mulOp (negOp(tmp), AA55);
AA06 = fmnaOp (tmp, AA56, AA06);
AA07 = fmnaOp (tmp, AA57, AA07);
AA08 = fmnaOp (tmp, AA58, AA08);
AA09 = fmnaOp (tmp, AA59, AA09);
tmp = AA15;
AA10 = fmnaOp (tmp, AA50, AA10);
AA11 = fmnaOp (tmp, AA51, AA11);
AA12 = fmnaOp (tmp, AA52, AA12);
AA13 = fmnaOp (tmp, AA53, AA13);
AA14 = fmnaOp (tmp, AA54, AA14);
AA15 = mulOp (negOp(tmp), AA55);
AA16 = fmnaOp (tmp, AA56, AA16);
AA17 = fmnaOp (tmp, AA57, AA17);
AA18 = fmnaOp (tmp, AA58, AA18);
AA19 = fmnaOp (tmp, AA59, AA19);
tmp = AA25;
AA20 = fmnaOp (tmp, AA50, AA20);
AA21 = fmnaOp (tmp, AA51, AA21);
AA22 = fmnaOp (tmp, AA52, AA22);
AA23 = fmnaOp (tmp, AA53, AA23);
AA24 = fmnaOp (tmp, AA54, AA24);
AA25 = mulOp (negOp(tmp), AA55);
AA26 = fmnaOp (tmp, AA56, AA26);
AA27 = fmnaOp (tmp, AA57, AA27);
AA28 = fmnaOp (tmp, AA58, AA28);
AA29 = fmnaOp (tmp, AA59, AA29);
tmp = AA35;
AA30 = fmnaOp (tmp, AA50, AA30);
AA31 = fmnaOp (tmp, AA51, AA31);
AA32 = fmnaOp (tmp, AA52, AA32);
AA33 = fmnaOp (tmp, AA53, AA33);
AA34 = fmnaOp (tmp, AA54, AA34);
AA35 = mulOp (negOp(tmp), AA55);
AA36 = fmnaOp (tmp, AA56, AA36);
AA37 = fmnaOp (tmp, AA57, AA37);
AA38 = fmnaOp (tmp, AA58, AA38);
AA39 = fmnaOp (tmp, AA59, AA39);
tmp = AA45;
AA40 = fmnaOp (tmp, AA50, AA40);
AA41 = fmnaOp (tmp, AA51, AA41);
AA42 = fmnaOp (tmp, AA52, AA42);
AA43 = fmnaOp (tmp, AA53, AA43);
AA44 = fmnaOp (tmp, AA54, AA44);
AA45 = mulOp (negOp(tmp), AA55);
AA46 = fmnaOp (tmp, AA56, AA46);
AA47 = fmnaOp (tmp, AA57, AA47);
AA48 = fmnaOp (tmp, AA58, AA48);
AA49 = fmnaOp (tmp, AA59, AA49);
tmp = AA65;
AA60 = fmnaOp (tmp, AA50, AA60);
AA61 = fmnaOp (tmp, AA51, AA61);
AA62 = fmnaOp (tmp, AA52, AA62);
AA63 = fmnaOp (tmp, AA53, AA63);
AA64 = fmnaOp (tmp, AA54, AA64);
AA65 = mulOp (negOp(tmp), AA55);
AA66 = fmnaOp (tmp, AA56, AA66);
AA67 = fmnaOp (tmp, AA57, AA67);
AA68 = fmnaOp (tmp, AA58, AA68);
AA69 = fmnaOp (tmp, AA59, AA69);
tmp = AA75;
AA70 = fmnaOp (tmp, AA50, AA70);
AA71 = fmnaOp (tmp, AA51, AA71);
AA72 = fmnaOp (tmp, AA52, AA72);
AA73 = fmnaOp (tmp, AA53, AA73);
AA74 = fmnaOp (tmp, AA54, AA74);
AA75 = mulOp (negOp(tmp), AA55);
AA76 = fmnaOp (tmp, AA56, AA76);
AA77 = fmnaOp (tmp, AA57, AA77);
AA78 = fmnaOp (tmp, AA58, AA78);
AA79 = fmnaOp (tmp, AA59, AA79);
tmp = AA85;
AA80 = fmnaOp (tmp, AA50, AA80);
AA81 = fmnaOp (tmp, AA51, AA81);
AA82 = fmnaOp (tmp, AA52, AA82);
AA83 = fmnaOp (tmp, AA53, AA83);
AA84 = fmnaOp (tmp, AA54, AA84);
AA85 = mulOp (negOp(tmp), AA55);
AA86 = fmnaOp (tmp, AA56, AA86);
AA87 = fmnaOp (tmp, AA57, AA87);
AA88 = fmnaOp (tmp, AA58, AA88);
AA89 = fmnaOp (tmp, AA59, AA89);
tmp = AA95;
AA90 = fmnaOp (tmp, AA50, AA90);
AA91 = fmnaOp (tmp, AA51, AA91);
AA92 = fmnaOp (tmp, AA52, AA92);
AA93 = fmnaOp (tmp, AA53, AA93);
AA94 = fmnaOp (tmp, AA54, AA94);
AA95 = mulOp (negOp(tmp), AA55);
AA96 = fmnaOp (tmp, AA56, AA96);
AA97 = fmnaOp (tmp, AA57, AA97);
AA98 = fmnaOp (tmp, AA58, AA98);
AA99 = fmnaOp (tmp, AA59, AA99);
/****************** iteration 6 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA66);
pvt = 6;
t = absOp (AA76);
if (t > p) { p = t; pvt = 7; }
t = absOp (AA86);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA96);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 6 */
if (pvt == 7) {
tmp = AA60; AA60 = AA70; AA70 = tmp;
tmp = AA61; AA61 = AA71; AA71 = tmp;
tmp = AA62; AA62 = AA72; AA72 = tmp;
tmp = AA63; AA63 = AA73; AA73 = tmp;
tmp = AA64; AA64 = AA74; AA74 = tmp;
tmp = AA65; AA65 = AA75; AA75 = tmp;
tmp = AA66; AA66 = AA76; AA76 = tmp;
tmp = AA67; AA67 = AA77; AA77 = tmp;
tmp = AA68; AA68 = AA78; AA78 = tmp;
tmp = AA69; AA69 = AA79; AA79 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm7; perm7 = i;
}
if (pvt == 8) {
tmp = AA60; AA60 = AA80; AA80 = tmp;
tmp = AA61; AA61 = AA81; AA81 = tmp;
tmp = AA62; AA62 = AA82; AA82 = tmp;
tmp = AA63; AA63 = AA83; AA83 = tmp;
tmp = AA64; AA64 = AA84; AA84 = tmp;
tmp = AA65; AA65 = AA85; AA85 = tmp;
tmp = AA66; AA66 = AA86; AA86 = tmp;
tmp = AA67; AA67 = AA87; AA87 = tmp;
tmp = AA68; AA68 = AA88; AA88 = tmp;
tmp = AA69; AA69 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA60; AA60 = AA90; AA90 = tmp;
tmp = AA61; AA61 = AA91; AA91 = tmp;
tmp = AA62; AA62 = AA92; AA92 = tmp;
tmp = AA63; AA63 = AA93; AA93 = tmp;
tmp = AA64; AA64 = AA94; AA94 = tmp;
tmp = AA65; AA65 = AA95; AA95 = tmp;
tmp = AA66; AA66 = AA96; AA96 = tmp;
tmp = AA67; AA67 = AA97; AA97 = tmp;
tmp = AA68; AA68 = AA98; AA98 = tmp;
tmp = AA69; AA69 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm6; perm6 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA66);
icol6 = perm6;
AA60 = mulOp (tmp, AA60);
AA61 = mulOp (tmp, AA61);
AA62 = mulOp (tmp, AA62);
AA63 = mulOp (tmp, AA63);
AA64 = mulOp (tmp, AA64);
AA65 = mulOp (tmp, AA65);
AA66 = tmp;
AA67 = mulOp (tmp, AA67);
AA68 = mulOp (tmp, AA68);
AA69 = mulOp (tmp, AA69);
/* eliminate above and below current row */
tmp = AA06;
AA00 = fmnaOp (tmp, AA60, AA00);
AA01 = fmnaOp (tmp, AA61, AA01);
AA02 = fmnaOp (tmp, AA62, AA02);
AA03 = fmnaOp (tmp, AA63, AA03);
AA04 = fmnaOp (tmp, AA64, AA04);
AA05 = fmnaOp (tmp, AA65, AA05);
AA06 = mulOp (negOp(tmp), AA66);
AA07 = fmnaOp (tmp, AA67, AA07);
AA08 = fmnaOp (tmp, AA68, AA08);
AA09 = fmnaOp (tmp, AA69, AA09);
tmp = AA16;
AA10 = fmnaOp (tmp, AA60, AA10);
AA11 = fmnaOp (tmp, AA61, AA11);
AA12 = fmnaOp (tmp, AA62, AA12);
AA13 = fmnaOp (tmp, AA63, AA13);
AA14 = fmnaOp (tmp, AA64, AA14);
AA15 = fmnaOp (tmp, AA65, AA15);
AA16 = mulOp (negOp(tmp), AA66);
AA17 = fmnaOp (tmp, AA67, AA17);
AA18 = fmnaOp (tmp, AA68, AA18);
AA19 = fmnaOp (tmp, AA69, AA19);
tmp = AA26;
AA20 = fmnaOp (tmp, AA60, AA20);
AA21 = fmnaOp (tmp, AA61, AA21);
AA22 = fmnaOp (tmp, AA62, AA22);
AA23 = fmnaOp (tmp, AA63, AA23);
AA24 = fmnaOp (tmp, AA64, AA24);
AA25 = fmnaOp (tmp, AA65, AA25);
AA26 = mulOp (negOp(tmp), AA66);
AA27 = fmnaOp (tmp, AA67, AA27);
AA28 = fmnaOp (tmp, AA68, AA28);
AA29 = fmnaOp (tmp, AA69, AA29);
tmp = AA36;
AA30 = fmnaOp (tmp, AA60, AA30);
AA31 = fmnaOp (tmp, AA61, AA31);
AA32 = fmnaOp (tmp, AA62, AA32);
AA33 = fmnaOp (tmp, AA63, AA33);
AA34 = fmnaOp (tmp, AA64, AA34);
AA35 = fmnaOp (tmp, AA65, AA35);
AA36 = mulOp (negOp(tmp), AA66);
AA37 = fmnaOp (tmp, AA67, AA37);
AA38 = fmnaOp (tmp, AA68, AA38);
AA39 = fmnaOp (tmp, AA69, AA39);
tmp = AA46;
AA40 = fmnaOp (tmp, AA60, AA40);
AA41 = fmnaOp (tmp, AA61, AA41);
AA42 = fmnaOp (tmp, AA62, AA42);
AA43 = fmnaOp (tmp, AA63, AA43);
AA44 = fmnaOp (tmp, AA64, AA44);
AA45 = fmnaOp (tmp, AA65, AA45);
AA46 = mulOp (negOp(tmp), AA66);
AA47 = fmnaOp (tmp, AA67, AA47);
AA48 = fmnaOp (tmp, AA68, AA48);
AA49 = fmnaOp (tmp, AA69, AA49);
tmp = AA56;
AA50 = fmnaOp (tmp, AA60, AA50);
AA51 = fmnaOp (tmp, AA61, AA51);
AA52 = fmnaOp (tmp, AA62, AA52);
AA53 = fmnaOp (tmp, AA63, AA53);
AA54 = fmnaOp (tmp, AA64, AA54);
AA55 = fmnaOp (tmp, AA65, AA55);
AA56 = mulOp (negOp(tmp), AA66);
AA57 = fmnaOp (tmp, AA67, AA57);
AA58 = fmnaOp (tmp, AA68, AA58);
AA59 = fmnaOp (tmp, AA69, AA59);
tmp = AA76;
AA70 = fmnaOp (tmp, AA60, AA70);
AA71 = fmnaOp (tmp, AA61, AA71);
AA72 = fmnaOp (tmp, AA62, AA72);
AA73 = fmnaOp (tmp, AA63, AA73);
AA74 = fmnaOp (tmp, AA64, AA74);
AA75 = fmnaOp (tmp, AA65, AA75);
AA76 = mulOp (negOp(tmp), AA66);
AA77 = fmnaOp (tmp, AA67, AA77);
AA78 = fmnaOp (tmp, AA68, AA78);
AA79 = fmnaOp (tmp, AA69, AA79);
tmp = AA86;
AA80 = fmnaOp (tmp, AA60, AA80);
AA81 = fmnaOp (tmp, AA61, AA81);
AA82 = fmnaOp (tmp, AA62, AA82);
AA83 = fmnaOp (tmp, AA63, AA83);
AA84 = fmnaOp (tmp, AA64, AA84);
AA85 = fmnaOp (tmp, AA65, AA85);
AA86 = mulOp (negOp(tmp), AA66);
AA87 = fmnaOp (tmp, AA67, AA87);
AA88 = fmnaOp (tmp, AA68, AA88);
AA89 = fmnaOp (tmp, AA69, AA89);
tmp = AA96;
AA90 = fmnaOp (tmp, AA60, AA90);
AA91 = fmnaOp (tmp, AA61, AA91);
AA92 = fmnaOp (tmp, AA62, AA92);
AA93 = fmnaOp (tmp, AA63, AA93);
AA94 = fmnaOp (tmp, AA64, AA94);
AA95 = fmnaOp (tmp, AA65, AA95);
AA96 = mulOp (negOp(tmp), AA66);
AA97 = fmnaOp (tmp, AA67, AA97);
AA98 = fmnaOp (tmp, AA68, AA98);
AA99 = fmnaOp (tmp, AA69, AA99);
/****************** iteration 7 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA77);
pvt = 7;
t = absOp (AA87);
if (t > p) { p = t; pvt = 8; }
t = absOp (AA97);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 7 */
if (pvt == 8) {
tmp = AA70; AA70 = AA80; AA80 = tmp;
tmp = AA71; AA71 = AA81; AA81 = tmp;
tmp = AA72; AA72 = AA82; AA82 = tmp;
tmp = AA73; AA73 = AA83; AA83 = tmp;
tmp = AA74; AA74 = AA84; AA84 = tmp;
tmp = AA75; AA75 = AA85; AA85 = tmp;
tmp = AA76; AA76 = AA86; AA86 = tmp;
tmp = AA77; AA77 = AA87; AA87 = tmp;
tmp = AA78; AA78 = AA88; AA88 = tmp;
tmp = AA79; AA79 = AA89; AA89 = tmp;
/* update permutation vector based on row swap */
i = perm7; perm7 = perm8; perm8 = i;
}
if (pvt == 9) {
tmp = AA70; AA70 = AA90; AA90 = tmp;
tmp = AA71; AA71 = AA91; AA91 = tmp;
tmp = AA72; AA72 = AA92; AA92 = tmp;
tmp = AA73; AA73 = AA93; AA93 = tmp;
tmp = AA74; AA74 = AA94; AA94 = tmp;
tmp = AA75; AA75 = AA95; AA95 = tmp;
tmp = AA76; AA76 = AA96; AA96 = tmp;
tmp = AA77; AA77 = AA97; AA97 = tmp;
tmp = AA78; AA78 = AA98; AA98 = tmp;
tmp = AA79; AA79 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm7; perm7 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA77);
icol7 = perm7;
AA70 = mulOp (tmp, AA70);
AA71 = mulOp (tmp, AA71);
AA72 = mulOp (tmp, AA72);
AA73 = mulOp (tmp, AA73);
AA74 = mulOp (tmp, AA74);
AA75 = mulOp (tmp, AA75);
AA76 = mulOp (tmp, AA76);
AA77 = tmp;
AA78 = mulOp (tmp, AA78);
AA79 = mulOp (tmp, AA79);
/* eliminate above and below current row */
tmp = AA07;
AA00 = fmnaOp (tmp, AA70, AA00);
AA01 = fmnaOp (tmp, AA71, AA01);
AA02 = fmnaOp (tmp, AA72, AA02);
AA03 = fmnaOp (tmp, AA73, AA03);
AA04 = fmnaOp (tmp, AA74, AA04);
AA05 = fmnaOp (tmp, AA75, AA05);
AA06 = fmnaOp (tmp, AA76, AA06);
AA07 = mulOp (negOp(tmp), AA77);
AA08 = fmnaOp (tmp, AA78, AA08);
AA09 = fmnaOp (tmp, AA79, AA09);
tmp = AA17;
AA10 = fmnaOp (tmp, AA70, AA10);
AA11 = fmnaOp (tmp, AA71, AA11);
AA12 = fmnaOp (tmp, AA72, AA12);
AA13 = fmnaOp (tmp, AA73, AA13);
AA14 = fmnaOp (tmp, AA74, AA14);
AA15 = fmnaOp (tmp, AA75, AA15);
AA16 = fmnaOp (tmp, AA76, AA16);
AA17 = mulOp (negOp(tmp), AA77);
AA18 = fmnaOp (tmp, AA78, AA18);
AA19 = fmnaOp (tmp, AA79, AA19);
tmp = AA27;
AA20 = fmnaOp (tmp, AA70, AA20);
AA21 = fmnaOp (tmp, AA71, AA21);
AA22 = fmnaOp (tmp, AA72, AA22);
AA23 = fmnaOp (tmp, AA73, AA23);
AA24 = fmnaOp (tmp, AA74, AA24);
AA25 = fmnaOp (tmp, AA75, AA25);
AA26 = fmnaOp (tmp, AA76, AA26);
AA27 = mulOp (negOp(tmp), AA77);
AA28 = fmnaOp (tmp, AA78, AA28);
AA29 = fmnaOp (tmp, AA79, AA29);
tmp = AA37;
AA30 = fmnaOp (tmp, AA70, AA30);
AA31 = fmnaOp (tmp, AA71, AA31);
AA32 = fmnaOp (tmp, AA72, AA32);
AA33 = fmnaOp (tmp, AA73, AA33);
AA34 = fmnaOp (tmp, AA74, AA34);
AA35 = fmnaOp (tmp, AA75, AA35);
AA36 = fmnaOp (tmp, AA76, AA36);
AA37 = mulOp (negOp(tmp), AA77);
AA38 = fmnaOp (tmp, AA78, AA38);
AA39 = fmnaOp (tmp, AA79, AA39);
tmp = AA47;
AA40 = fmnaOp (tmp, AA70, AA40);
AA41 = fmnaOp (tmp, AA71, AA41);
AA42 = fmnaOp (tmp, AA72, AA42);
AA43 = fmnaOp (tmp, AA73, AA43);
AA44 = fmnaOp (tmp, AA74, AA44);
AA45 = fmnaOp (tmp, AA75, AA45);
AA46 = fmnaOp (tmp, AA76, AA46);
AA47 = mulOp (negOp(tmp), AA77);
AA48 = fmnaOp (tmp, AA78, AA48);
AA49 = fmnaOp (tmp, AA79, AA49);
tmp = AA57;
AA50 = fmnaOp (tmp, AA70, AA50);
AA51 = fmnaOp (tmp, AA71, AA51);
AA52 = fmnaOp (tmp, AA72, AA52);
AA53 = fmnaOp (tmp, AA73, AA53);
AA54 = fmnaOp (tmp, AA74, AA54);
AA55 = fmnaOp (tmp, AA75, AA55);
AA56 = fmnaOp (tmp, AA76, AA56);
AA57 = mulOp (negOp(tmp), AA77);
AA58 = fmnaOp (tmp, AA78, AA58);
AA59 = fmnaOp (tmp, AA79, AA59);
tmp = AA67;
AA60 = fmnaOp (tmp, AA70, AA60);
AA61 = fmnaOp (tmp, AA71, AA61);
AA62 = fmnaOp (tmp, AA72, AA62);
AA63 = fmnaOp (tmp, AA73, AA63);
AA64 = fmnaOp (tmp, AA74, AA64);
AA65 = fmnaOp (tmp, AA75, AA65);
AA66 = fmnaOp (tmp, AA76, AA66);
AA67 = mulOp (negOp(tmp), AA77);
AA68 = fmnaOp (tmp, AA78, AA68);
AA69 = fmnaOp (tmp, AA79, AA69);
tmp = AA87;
AA80 = fmnaOp (tmp, AA70, AA80);
AA81 = fmnaOp (tmp, AA71, AA81);
AA82 = fmnaOp (tmp, AA72, AA82);
AA83 = fmnaOp (tmp, AA73, AA83);
AA84 = fmnaOp (tmp, AA74, AA84);
AA85 = fmnaOp (tmp, AA75, AA85);
AA86 = fmnaOp (tmp, AA76, AA86);
AA87 = mulOp (negOp(tmp), AA77);
AA88 = fmnaOp (tmp, AA78, AA88);
AA89 = fmnaOp (tmp, AA79, AA89);
tmp = AA97;
AA90 = fmnaOp (tmp, AA70, AA90);
AA91 = fmnaOp (tmp, AA71, AA91);
AA92 = fmnaOp (tmp, AA72, AA92);
AA93 = fmnaOp (tmp, AA73, AA93);
AA94 = fmnaOp (tmp, AA74, AA94);
AA95 = fmnaOp (tmp, AA75, AA95);
AA96 = fmnaOp (tmp, AA76, AA96);
AA97 = mulOp (negOp(tmp), AA77);
AA98 = fmnaOp (tmp, AA78, AA98);
AA99 = fmnaOp (tmp, AA79, AA99);
/****************** iteration 8 ****************/
#if USE_PIVOTING
/* search pivot row */
p = absOp (AA88);
pvt = 8;
t = absOp (AA98);
if (t > p) { p = t; pvt = 9; }
/* swap pivot row with row 8 */
if (pvt == 9) {
tmp = AA80; AA80 = AA90; AA90 = tmp;
tmp = AA81; AA81 = AA91; AA91 = tmp;
tmp = AA82; AA82 = AA92; AA92 = tmp;
tmp = AA83; AA83 = AA93; AA93 = tmp;
tmp = AA84; AA84 = AA94; AA94 = tmp;
tmp = AA85; AA85 = AA95; AA95 = tmp;
tmp = AA86; AA86 = AA96; AA96 = tmp;
tmp = AA87; AA87 = AA97; AA97 = tmp;
tmp = AA88; AA88 = AA98; AA98 = tmp;
tmp = AA89; AA89 = AA99; AA99 = tmp;
/* update permutation vector based on row swap */
i = perm8; perm8 = perm9; perm9 = i;
}
#endif // USE_PIVOTING
/* scale current row */
tmp = rcpOp (AA88);
icol8 = perm8;
AA80 = mulOp (tmp, AA80);
AA81 = mulOp (tmp, AA81);
AA82 = mulOp (tmp, AA82);
AA83 = mulOp (tmp, AA83);
AA84 = mulOp (tmp, AA84);
AA85 = mulOp (tmp, AA85);
AA86 = mulOp (tmp, AA86);
AA87 = mulOp (tmp, AA87);
AA88 = tmp;
AA89 = mulOp (tmp, AA89);
/* eliminate above and below current row */
tmp = AA08;
AA00 = fmnaOp (tmp, AA80, AA00);
AA01 = fmnaOp (tmp, AA81, AA01);
AA02 = fmnaOp (tmp, AA82, AA02);
AA03 = fmnaOp (tmp, AA83, AA03);
AA04 = fmnaOp (tmp, AA84, AA04);
AA05 = fmnaOp (tmp, AA85, AA05);
AA06 = fmnaOp (tmp, AA86, AA06);
AA07 = fmnaOp (tmp, AA87, AA07);
AA08 = mulOp (negOp(tmp), AA88);
AA09 = fmnaOp (tmp, AA89, AA09);
tmp = AA18;
AA10 = fmnaOp (tmp, AA80, AA10);
AA11 = fmnaOp (tmp, AA81, AA11);
AA12 = fmnaOp (tmp, AA82, AA12);
AA13 = fmnaOp (tmp, AA83, AA13);
AA14 = fmnaOp (tmp, AA84, AA14);
AA15 = fmnaOp (tmp, AA85, AA15);
AA16 = fmnaOp (tmp, AA86, AA16);
AA17 = fmnaOp (tmp, AA87, AA17);
AA18 = mulOp (negOp(tmp), AA88);
AA19 = fmnaOp (tmp, AA89, AA19);
tmp = AA28;
AA20 = fmnaOp (tmp, AA80, AA20);
AA21 = fmnaOp (tmp, AA81, AA21);
AA22 = fmnaOp (tmp, AA82, AA22);
AA23 = fmnaOp (tmp, AA83, AA23);
AA24 = fmnaOp (tmp, AA84, AA24);
AA25 = fmnaOp (tmp, AA85, AA25);
AA26 = fmnaOp (tmp, AA86, AA26);
AA27 = fmnaOp (tmp, AA87, AA27);
AA28 = mulOp (negOp(tmp), AA88);
AA29 = fmnaOp (tmp, AA89, AA29);
tmp = AA38;
AA30 = fmnaOp (tmp, AA80, AA30);
AA31 = fmnaOp (tmp, AA81, AA31);
AA32 = fmnaOp (tmp, AA82, AA32);
AA33 = fmnaOp (tmp, AA83, AA33);
AA34 = fmnaOp (tmp, AA84, AA34);
AA35 = fmnaOp (tmp, AA85, AA35);
AA36 = fmnaOp (tmp, AA86, AA36);
AA37 = fmnaOp (tmp, AA87, AA37);
AA38 = mulOp (negOp(tmp), AA88);
AA39 = fmnaOp (tmp, AA89, AA39);
tmp = AA48;
AA40 = fmnaOp (tmp, AA80, AA40);
AA41 = fmnaOp (tmp, AA81, AA41);
AA42 = fmnaOp (tmp, AA82, AA42);
AA43 = fmnaOp (tmp, AA83, AA43);
AA44 = fmnaOp (tmp, AA84, AA44);
AA45 = fmnaOp (tmp, AA85, AA45);
AA46 = fmnaOp (tmp, AA86, AA46);
AA47 = fmnaOp (tmp, AA87, AA47);
AA48 = mulOp (negOp(tmp), AA88);
AA49 = fmnaOp (tmp, AA89, AA49);
tmp = AA58;
AA50 = fmnaOp (tmp, AA80, AA50);
AA51 = fmnaOp (tmp, AA81, AA51);
AA52 = fmnaOp (tmp, AA82, AA52);
AA53 = fmnaOp (tmp, AA83, AA53);
AA54 = fmnaOp (tmp, AA84, AA54);
AA55 = fmnaOp (tmp, AA85, AA55);
AA56 = fmnaOp (tmp, AA86, AA56);
AA57 = fmnaOp (tmp, AA87, AA57);
AA58 = mulOp (negOp(tmp), AA88);
AA59 = fmnaOp (tmp, AA89, AA59);
tmp = AA68;
AA60 = fmnaOp (tmp, AA80, AA60);
AA61 = fmnaOp (tmp, AA81, AA61);
AA62 = fmnaOp (tmp, AA82, AA62);
AA63 = fmnaOp (tmp, AA83, AA63);
AA64 = fmnaOp (tmp, AA84, AA64);
AA65 = fmnaOp (tmp, AA85, AA65);
AA66 = fmnaOp (tmp, AA86, AA66);
AA67 = fmnaOp (tmp, AA87, AA67);
AA68 = mulOp (negOp(tmp), AA88);
AA69 = fmnaOp (tmp, AA89, AA69);
tmp = AA78;
AA70 = fmnaOp (tmp, AA80, AA70);
AA71 = fmnaOp (tmp, AA81, AA71);
AA72 = fmnaOp (tmp, AA82, AA72);
AA73 = fmnaOp (tmp, AA83, AA73);
AA74 = fmnaOp (tmp, AA84, AA74);
AA75 = fmnaOp (tmp, AA85, AA75);
AA76 = fmnaOp (tmp, AA86, AA76);
AA77 = fmnaOp (tmp, AA87, AA77);
AA78 = mulOp (negOp(tmp), AA88);
AA79 = fmnaOp (tmp, AA89, AA79);
tmp = AA98;
AA90 = fmnaOp (tmp, AA80, AA90);
AA91 = fmnaOp (tmp, AA81, AA91);
AA92 = fmnaOp (tmp, AA82, AA92);
AA93 = fmnaOp (tmp, AA83, AA93);
AA94 = fmnaOp (tmp, AA84, AA94);
AA95 = fmnaOp (tmp, AA85, AA95);
AA96 = fmnaOp (tmp, AA86, AA96);
AA97 = fmnaOp (tmp, AA87, AA97);
AA98 = mulOp (negOp(tmp), AA88);
AA99 = fmnaOp (tmp, AA89, AA99);
/****************** iteration 9 ****************/
/* scale current row */
tmp = rcpOp (AA99);
icol9 = perm9;
AA90 = mulOp (tmp, AA90);
AA91 = mulOp (tmp, AA91);
AA92 = mulOp (tmp, AA92);
AA93 = mulOp (tmp, AA93);
AA94 = mulOp (tmp, AA94);
AA95 = mulOp (tmp, AA95);
AA96 = mulOp (tmp, AA96);
AA97 = mulOp (tmp, AA97);
AA98 = mulOp (tmp, AA98);
AA99 = tmp;
/* eliminate above and below current row */
tmp = AA09;
AA00 = fmnaOp (tmp, AA90, AA00);
AA01 = fmnaOp (tmp, AA91, AA01);
AA02 = fmnaOp (tmp, AA92, AA02);
AA03 = fmnaOp (tmp, AA93, AA03);
AA04 = fmnaOp (tmp, AA94, AA04);
AA05 = fmnaOp (tmp, AA95, AA05);
AA06 = fmnaOp (tmp, AA96, AA06);
AA07 = fmnaOp (tmp, AA97, AA07);
AA08 = fmnaOp (tmp, AA98, AA08);
AA09 = mulOp (negOp(tmp), AA99);
tmp = AA19;
AA10 = fmnaOp (tmp, AA90, AA10);
AA11 = fmnaOp (tmp, AA91, AA11);
AA12 = fmnaOp (tmp, AA92, AA12);
AA13 = fmnaOp (tmp, AA93, AA13);
AA14 = fmnaOp (tmp, AA94, AA14);
AA15 = fmnaOp (tmp, AA95, AA15);
AA16 = fmnaOp (tmp, AA96, AA16);
AA17 = fmnaOp (tmp, AA97, AA17);
AA18 = fmnaOp (tmp, AA98, AA18);
AA19 = mulOp (negOp(tmp), AA99);
tmp = AA29;
AA20 = fmnaOp (tmp, AA90, AA20);
AA21 = fmnaOp (tmp, AA91, AA21);
AA22 = fmnaOp (tmp, AA92, AA22);
AA23 = fmnaOp (tmp, AA93, AA23);
AA24 = fmnaOp (tmp, AA94, AA24);
AA25 = fmnaOp (tmp, AA95, AA25);
AA26 = fmnaOp (tmp, AA96, AA26);
AA27 = fmnaOp (tmp, AA97, AA27);
AA28 = fmnaOp (tmp, AA98, AA28);
AA29 = mulOp (negOp(tmp), AA99);
tmp = AA39;
AA30 = fmnaOp (tmp, AA90, AA30);
AA31 = fmnaOp (tmp, AA91, AA31);
AA32 = fmnaOp (tmp, AA92, AA32);
AA33 = fmnaOp (tmp, AA93, AA33);
AA34 = fmnaOp (tmp, AA94, AA34);
AA35 = fmnaOp (tmp, AA95, AA35);
AA36 = fmnaOp (tmp, AA96, AA36);
AA37 = fmnaOp (tmp, AA97, AA37);
AA38 = fmnaOp (tmp, AA98, AA38);
AA39 = mulOp (negOp(tmp), AA99);
tmp = AA49;
AA40 = fmnaOp (tmp, AA90, AA40);
AA41 = fmnaOp (tmp, AA91, AA41);
AA42 = fmnaOp (tmp, AA92, AA42);
AA43 = fmnaOp (tmp, AA93, AA43);
AA44 = fmnaOp (tmp, AA94, AA44);
AA45 = fmnaOp (tmp, AA95, AA45);
AA46 = fmnaOp (tmp, AA96, AA46);
AA47 = fmnaOp (tmp, AA97, AA47);
AA48 = fmnaOp (tmp, AA98, AA48);
AA49 = mulOp (negOp(tmp), AA99);
tmp = AA59;
AA50 = fmnaOp (tmp, AA90, AA50);
AA51 = fmnaOp (tmp, AA91, AA51);
AA52 = fmnaOp (tmp, AA92, AA52);
AA53 = fmnaOp (tmp, AA93, AA53);
AA54 = fmnaOp (tmp, AA94, AA54);
AA55 = fmnaOp (tmp, AA95, AA55);
AA56 = fmnaOp (tmp, AA96, AA56);
AA57 = fmnaOp (tmp, AA97, AA57);
AA58 = fmnaOp (tmp, AA98, AA58);
AA59 = mulOp (negOp(tmp), AA99);
tmp = AA69;
AA60 = fmnaOp (tmp, AA90, AA60);
AA61 = fmnaOp (tmp, AA91, AA61);
AA62 = fmnaOp (tmp, AA92, AA62);
AA63 = fmnaOp (tmp, AA93, AA63);
AA64 = fmnaOp (tmp, AA94, AA64);
AA65 = fmnaOp (tmp, AA95, AA65);
AA66 = fmnaOp (tmp, AA96, AA66);
AA67 = fmnaOp (tmp, AA97, AA67);
AA68 = fmnaOp (tmp, AA98, AA68);
AA69 = mulOp (negOp(tmp), AA99);
tmp = AA79;
AA70 = fmnaOp (tmp, AA90, AA70);
AA71 = fmnaOp (tmp, AA91, AA71);
AA72 = fmnaOp (tmp, AA92, AA72);
AA73 = fmnaOp (tmp, AA93, AA73);
AA74 = fmnaOp (tmp, AA94, AA74);
AA75 = fmnaOp (tmp, AA95, AA75);
AA76 = fmnaOp (tmp, AA96, AA76);
AA77 = fmnaOp (tmp, AA97, AA77);
AA78 = fmnaOp (tmp, AA98, AA78);
AA79 = mulOp (negOp(tmp), AA99);
tmp = AA89;
AA80 = fmnaOp (tmp, AA90, AA80);
AA81 = fmnaOp (tmp, AA91, AA81);
AA82 = fmnaOp (tmp, AA92, AA82);
AA83 = fmnaOp (tmp, AA93, AA83);
AA84 = fmnaOp (tmp, AA94, AA84);
AA85 = fmnaOp (tmp, AA95, AA85);
AA86 = fmnaOp (tmp, AA96, AA86);
AA87 = fmnaOp (tmp, AA97, AA87);
AA88 = fmnaOp (tmp, AA98, AA88);
AA89 = mulOp (negOp(tmp), AA99);
/* sort columns into the correct order */
Ainv(0,icol0) = AA00;
Ainv(1,icol0) = AA10;
Ainv(2,icol0) = AA20;
Ainv(3,icol0) = AA30;
Ainv(4,icol0) = AA40;
Ainv(5,icol0) = AA50;
Ainv(6,icol0) = AA60;
Ainv(7,icol0) = AA70;
Ainv(8,icol0) = AA80;
Ainv(9,icol0) = AA90;
Ainv(0,icol1) = AA01;
Ainv(1,icol1) = AA11;
Ainv(2,icol1) = AA21;
Ainv(3,icol1) = AA31;
Ainv(4,icol1) = AA41;
Ainv(5,icol1) = AA51;
Ainv(6,icol1) = AA61;
Ainv(7,icol1) = AA71;
Ainv(8,icol1) = AA81;
Ainv(9,icol1) = AA91;
Ainv(0,icol2) = AA02;
Ainv(1,icol2) = AA12;
Ainv(2,icol2) = AA22;
Ainv(3,icol2) = AA32;
Ainv(4,icol2) = AA42;
Ainv(5,icol2) = AA52;
Ainv(6,icol2) = AA62;
Ainv(7,icol2) = AA72;
Ainv(8,icol2) = AA82;
Ainv(9,icol2) = AA92;
Ainv(0,icol3) = AA03;
Ainv(1,icol3) = AA13;
Ainv(2,icol3) = AA23;
Ainv(3,icol3) = AA33;
Ainv(4,icol3) = AA43;
Ainv(5,icol3) = AA53;
Ainv(6,icol3) = AA63;
Ainv(7,icol3) = AA73;
Ainv(8,icol3) = AA83;
Ainv(9,icol3) = AA93;
Ainv(0,icol4) = AA04;
Ainv(1,icol4) = AA14;
Ainv(2,icol4) = AA24;
Ainv(3,icol4) = AA34;
Ainv(4,icol4) = AA44;
Ainv(5,icol4) = AA54;
Ainv(6,icol4) = AA64;
Ainv(7,icol4) = AA74;
Ainv(8,icol4) = AA84;
Ainv(9,icol4) = AA94;
Ainv(0,icol5) = AA05;
Ainv(1,icol5) = AA15;
Ainv(2,icol5) = AA25;
Ainv(3,icol5) = AA35;
Ainv(4,icol5) = AA45;
Ainv(5,icol5) = AA55;
Ainv(6,icol5) = AA65;
Ainv(7,icol5) = AA75;
Ainv(8,icol5) = AA85;
Ainv(9,icol5) = AA95;
Ainv(0,icol6) = AA06;
Ainv(1,icol6) = AA16;
Ainv(2,icol6) = AA26;
Ainv(3,icol6) = AA36;
Ainv(4,icol6) = AA46;
Ainv(5,icol6) = AA56;
Ainv(6,icol6) = AA66;
Ainv(7,icol6) = AA76;
Ainv(8,icol6) = AA86;
Ainv(9,icol6) = AA96;
Ainv(0,icol7) = AA07;
Ainv(1,icol7) = AA17;
Ainv(2,icol7) = AA27;
Ainv(3,icol7) = AA37;
Ainv(4,icol7) = AA47;
Ainv(5,icol7) = AA57;
Ainv(6,icol7) = AA67;
Ainv(7,icol7) = AA77;
Ainv(8,icol7) = AA87;
Ainv(9,icol7) = AA97;
Ainv(0,icol8) = AA08;
Ainv(1,icol8) = AA18;
Ainv(2,icol8) = AA28;
Ainv(3,icol8) = AA38;
Ainv(4,icol8) = AA48;
Ainv(5,icol8) = AA58;
Ainv(6,icol8) = AA68;
Ainv(7,icol8) = AA78;
Ainv(8,icol8) = AA88;
Ainv(9,icol8) = AA98;
Ainv(0,icol9) = AA09;
Ainv(1,icol9) = AA19;
Ainv(2,icol9) = AA29;
Ainv(3,icol9) = AA39;
Ainv(4,icol9) = AA49;
Ainv(5,icol9) = AA59;
Ainv(6,icol9) = AA69;
Ainv(7,icol9) = AA79;
Ainv(8,icol9) = AA89;
Ainv(9,icol9) = AA99;
}
} /* if (!isDoubleComplex<T>()) */
}
extern __shared__ double2 shmem[];
template<typename T, int pad, int pivot_thrds, int arch>
__global__ void
__launch_bounds__ (config<T,arch>::gje3MaxThrds, config<T,arch>::gje3MinBlks)
matinv_gje3 (const T *A, T *Ainv, int N, int batch)
{
T *As = (T*)shmem;
typename config<T,arch>::absValType *Val =
(typename config<T,arch>::absValType *)(As + (N+pad) * N);
int *Loc = (int*)(Val + pivot_thrds);
int *icol = (int*)(Loc + pivot_thrds);
int *perm = (int*)(icol + N);
T diagRcp;
const int ofs = pad;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blkNum = blockIdx.y * gridDim.x + blockIdx.x;
if (blkNum >= batch) return;
A += blkNum * N * N;
Ainv += blkNum * N * N;
/* Load matrix into shared memory */
for (int i = tx; i < N; i += blockDim.x) {
As(i,ty) = A[ty * N + i];
}
/* initialize row permutation vector */
if (tx == 0) perm[ty] = ty;
int j = 0;
do {
/* Look for pivot */
__syncthreads();
if ((tx == 0) && (ty < pivot_thrds)) {
typename config<T,arch>::absValType val0 = absOp (As(j,j));
int loc0 = j;
int i = j + 1 + ty;
T *dp = &As(i,j);
const int incr = &As(pivot_thrds,0)-&As(0,0);
while (i < N) {
typename config<T,arch>::absValType vali = absOp (*dp);
if (val0 < vali) {
val0 = vali;
loc0 = i;
}
dp += incr;
i += pivot_thrds;
}
Loc[ty] = loc0;
if (pivot_thrds > 1) Val[ty] = val0;
}
/* Swap current row with pivot */
__syncthreads();
if (tx == 0) {
T tmp;
int it;
int Pl = Loc[0];
if (pivot_thrds > 1) {
typename config<T,arch>::absValType val = Val[0];
int i = 1;
for (; i < (pivot_thrds-1); i++) {
if (Val[i] > val) {
Pl = Loc[i];
val = Val[i];
}
}
if (Val[i] > val) {
Pl = Loc[i];
}
}
tmp = As(Pl,ty);
As(Pl,ty) = As(j,ty);
As(j,ty) = tmp;
/* update permutation vector based on row swap */
if (ty == j) {
it = perm[Pl];
perm[Pl] = perm[j];
perm[j] = it;
}
}
/* scale current row, except current column */
__syncthreads();
diagRcp = rcpOp (As(j,j));
if ((tx == 0) && !(ty == j)) {
As(j,ty) = mulOp (As(j,ty), diagRcp);
}
/* update above and below current row, except current column */
__syncthreads();
for (int i = tx; i < N; i += blockDim.x) {
if ((i != j) && !(ty == j)) {
As(i,ty) = fmnaOp (As(i,j), As(j,ty), As(i,ty));
}
}
/* update current column, and column permutation vector */
__syncthreads();
if (tx == 0) {
As(ty,j) = (ty == j) ? diagRcp : negOp (mulOp (As(ty,j), diagRcp));
if (ty == j) {
icol[j] = perm[j];
}
}
j++;
} while (j < N);
__syncthreads();
for (int i = tx; i < N; i += blockDim.x) {
Ainv[icol[ty] * N + i] = As(i,ty);
}
}
template <typename T, int arch>
int matinv_gje3 (const T *A_d, T *Ainv_d, int n, int batch)
{
typedef void (* func)(const T *A_d, T *Ainv_d, int n, int batch);
static int padding[110] = {
config<T,arch>::gje3Pad_00, config<T,arch>::gje3Pad_01,
config<T,arch>::gje3Pad_02, config<T,arch>::gje3Pad_03,
config<T,arch>::gje3Pad_04, config<T,arch>::gje3Pad_05,
config<T,arch>::gje3Pad_06, config<T,arch>::gje3Pad_07,
config<T,arch>::gje3Pad_08, config<T,arch>::gje3Pad_09,
config<T,arch>::gje3Pad_10, config<T,arch>::gje3Pad_11,
config<T,arch>::gje3Pad_12, config<T,arch>::gje3Pad_13,
config<T,arch>::gje3Pad_14, config<T,arch>::gje3Pad_15,
config<T,arch>::gje3Pad_16, config<T,arch>::gje3Pad_17,
config<T,arch>::gje3Pad_18, config<T,arch>::gje3Pad_19,
config<T,arch>::gje3Pad_20, config<T,arch>::gje3Pad_21,
config<T,arch>::gje3Pad_22, config<T,arch>::gje3Pad_23,
config<T,arch>::gje3Pad_24, config<T,arch>::gje3Pad_25,
config<T,arch>::gje3Pad_26, config<T,arch>::gje3Pad_27,
config<T,arch>::gje3Pad_28, config<T,arch>::gje3Pad_29,
config<T,arch>::gje3Pad_30, config<T,arch>::gje3Pad_31,
config<T,arch>::gje3Pad_32, config<T,arch>::gje3Pad_33,
config<T,arch>::gje3Pad_34, config<T,arch>::gje3Pad_35,
config<T,arch>::gje3Pad_36, config<T,arch>::gje3Pad_37,
config<T,arch>::gje3Pad_38, config<T,arch>::gje3Pad_39,
config<T,arch>::gje3Pad_40, config<T,arch>::gje3Pad_41,
config<T,arch>::gje3Pad_42, config<T,arch>::gje3Pad_43,
config<T,arch>::gje3Pad_44, config<T,arch>::gje3Pad_45,
config<T,arch>::gje3Pad_46, config<T,arch>::gje3Pad_47,
config<T,arch>::gje3Pad_48, config<T,arch>::gje3Pad_49,
config<T,arch>::gje3Pad_50, config<T,arch>::gje3Pad_51,
config<T,arch>::gje3Pad_52, config<T,arch>::gje3Pad_53,
config<T,arch>::gje3Pad_54, config<T,arch>::gje3Pad_55,
config<T,arch>::gje3Pad_56, config<T,arch>::gje3Pad_57,
config<T,arch>::gje3Pad_58, config<T,arch>::gje3Pad_59,
config<T,arch>::gje3Pad_60, config<T,arch>::gje3Pad_61,
config<T,arch>::gje3Pad_62, config<T,arch>::gje3Pad_63,
config<T,arch>::gje3Pad_64, config<T,arch>::gje3Pad_65,
config<T,arch>::gje3Pad_66, config<T,arch>::gje3Pad_67,
config<T,arch>::gje3Pad_68, config<T,arch>::gje3Pad_69,
config<T,arch>::gje3Pad_70, config<T,arch>::gje3Pad_71,
config<T,arch>::gje3Pad_72, config<T,arch>::gje3Pad_73,
config<T,arch>::gje3Pad_74, config<T,arch>::gje3Pad_75,
config<T,arch>::gje3Pad_76, config<T,arch>::gje3Pad_77,
config<T,arch>::gje3Pad_78, config<T,arch>::gje3Pad_79,
config<T,arch>::gje3Pad_80, config<T,arch>::gje3Pad_81,
config<T,arch>::gje3Pad_82, config<T,arch>::gje3Pad_83,
config<T,arch>::gje3Pad_84, config<T,arch>::gje3Pad_85,
config<T,arch>::gje3Pad_86, config<T,arch>::gje3Pad_87,
config<T,arch>::gje3Pad_88, config<T,arch>::gje3Pad_89,
config<T,arch>::gje3Pad_90, config<T,arch>::gje3Pad_91,
config<T,arch>::gje3Pad_92, config<T,arch>::gje3Pad_93,
config<T,arch>::gje3Pad_94, config<T,arch>::gje3Pad_95,
config<T,arch>::gje3Pad_96, config<T,arch>::gje3Pad_97,
config<T,arch>::gje3Pad_98, config<T,arch>::gje3Pad_99,
config<T,arch>::gje3Pad_100,config<T,arch>::gje3Pad_101,
config<T,arch>::gje3Pad_102,config<T,arch>::gje3Pad_103,
config<T,arch>::gje3Pad_104,config<T,arch>::gje3Pad_105,
config<T,arch>::gje3Pad_106,config<T,arch>::gje3Pad_107,
config<T,arch>::gje3Pad_108,config<T,arch>::gje3Pad_109
};
static int dimX[110] = {
config<T,arch>::gje3DimX_00, config<T,arch>::gje3DimX_01,
config<T,arch>::gje3DimX_02, config<T,arch>::gje3DimX_03,
config<T,arch>::gje3DimX_04, config<T,arch>::gje3DimX_05,
config<T,arch>::gje3DimX_06, config<T,arch>::gje3DimX_07,
config<T,arch>::gje3DimX_08, config<T,arch>::gje3DimX_09,
config<T,arch>::gje3DimX_10, config<T,arch>::gje3DimX_11,
config<T,arch>::gje3DimX_12, config<T,arch>::gje3DimX_13,
config<T,arch>::gje3DimX_14, config<T,arch>::gje3DimX_15,
config<T,arch>::gje3DimX_16, config<T,arch>::gje3DimX_17,
config<T,arch>::gje3DimX_18, config<T,arch>::gje3DimX_19,
config<T,arch>::gje3DimX_20, config<T,arch>::gje3DimX_21,
config<T,arch>::gje3DimX_22, config<T,arch>::gje3DimX_23,
config<T,arch>::gje3DimX_24, config<T,arch>::gje3DimX_25,
config<T,arch>::gje3DimX_26, config<T,arch>::gje3DimX_27,
config<T,arch>::gje3DimX_28, config<T,arch>::gje3DimX_29,
config<T,arch>::gje3DimX_30, config<T,arch>::gje3DimX_31,
config<T,arch>::gje3DimX_32, config<T,arch>::gje3DimX_33,
config<T,arch>::gje3DimX_34, config<T,arch>::gje3DimX_35,
config<T,arch>::gje3DimX_36, config<T,arch>::gje3DimX_37,
config<T,arch>::gje3DimX_38, config<T,arch>::gje3DimX_39,
config<T,arch>::gje3DimX_40, config<T,arch>::gje3DimX_41,
config<T,arch>::gje3DimX_42, config<T,arch>::gje3DimX_43,
config<T,arch>::gje3DimX_44, config<T,arch>::gje3DimX_45,
config<T,arch>::gje3DimX_46, config<T,arch>::gje3DimX_47,
config<T,arch>::gje3DimX_48, config<T,arch>::gje3DimX_49,
config<T,arch>::gje3DimX_50, config<T,arch>::gje3DimX_51,
config<T,arch>::gje3DimX_52, config<T,arch>::gje3DimX_53,
config<T,arch>::gje3DimX_54, config<T,arch>::gje3DimX_55,
config<T,arch>::gje3DimX_56, config<T,arch>::gje3DimX_57,
config<T,arch>::gje3DimX_58, config<T,arch>::gje3DimX_59,
config<T,arch>::gje3DimX_60, config<T,arch>::gje3DimX_61,
config<T,arch>::gje3DimX_62, config<T,arch>::gje3DimX_63,
config<T,arch>::gje3DimX_64, config<T,arch>::gje3DimX_65,
config<T,arch>::gje3DimX_66, config<T,arch>::gje3DimX_67,
config<T,arch>::gje3DimX_68, config<T,arch>::gje3DimX_69,
config<T,arch>::gje3DimX_70, config<T,arch>::gje3DimX_71,
config<T,arch>::gje3DimX_72, config<T,arch>::gje3DimX_73,
config<T,arch>::gje3DimX_74, config<T,arch>::gje3DimX_75,
config<T,arch>::gje3DimX_76, config<T,arch>::gje3DimX_77,
config<T,arch>::gje3DimX_78, config<T,arch>::gje3DimX_79,
config<T,arch>::gje3DimX_80, config<T,arch>::gje3DimX_81,
config<T,arch>::gje3DimX_82, config<T,arch>::gje3DimX_83,
config<T,arch>::gje3DimX_84, config<T,arch>::gje3DimX_85,
config<T,arch>::gje3DimX_86, config<T,arch>::gje3DimX_87,
config<T,arch>::gje3DimX_88, config<T,arch>::gje3DimX_89,
config<T,arch>::gje3DimX_90, config<T,arch>::gje3DimX_91,
config<T,arch>::gje3DimX_92, config<T,arch>::gje3DimX_93,
config<T,arch>::gje3DimX_94, config<T,arch>::gje3DimX_95,
config<T,arch>::gje3DimX_96, config<T,arch>::gje3DimX_97,
config<T,arch>::gje3DimX_98, config<T,arch>::gje3DimX_99,
config<T,arch>::gje3DimX_100,config<T,arch>::gje3DimX_101,
config<T,arch>::gje3DimX_102,config<T,arch>::gje3DimX_103,
config<T,arch>::gje3DimX_104,config<T,arch>::gje3DimX_105,
config<T,arch>::gje3DimX_106,config<T,arch>::gje3DimX_107,
config<T,arch>::gje3DimX_108,config<T,arch>::gje3DimX_109
};
static int srchThrd[110] = {
config<T,arch>::gje3SrchThrd_00, config<T,arch>::gje3SrchThrd_01,
config<T,arch>::gje3SrchThrd_02, config<T,arch>::gje3SrchThrd_03,
config<T,arch>::gje3SrchThrd_04, config<T,arch>::gje3SrchThrd_05,
config<T,arch>::gje3SrchThrd_06, config<T,arch>::gje3SrchThrd_07,
config<T,arch>::gje3SrchThrd_08, config<T,arch>::gje3SrchThrd_09,
config<T,arch>::gje3SrchThrd_10, config<T,arch>::gje3SrchThrd_11,
config<T,arch>::gje3SrchThrd_12, config<T,arch>::gje3SrchThrd_13,
config<T,arch>::gje3SrchThrd_14, config<T,arch>::gje3SrchThrd_15,
config<T,arch>::gje3SrchThrd_16, config<T,arch>::gje3SrchThrd_17,
config<T,arch>::gje3SrchThrd_18, config<T,arch>::gje3SrchThrd_19,
config<T,arch>::gje3SrchThrd_20, config<T,arch>::gje3SrchThrd_21,
config<T,arch>::gje3SrchThrd_22, config<T,arch>::gje3SrchThrd_23,
config<T,arch>::gje3SrchThrd_24, config<T,arch>::gje3SrchThrd_25,
config<T,arch>::gje3SrchThrd_26, config<T,arch>::gje3SrchThrd_27,
config<T,arch>::gje3SrchThrd_28, config<T,arch>::gje3SrchThrd_29,
config<T,arch>::gje3SrchThrd_30, config<T,arch>::gje3SrchThrd_31,
config<T,arch>::gje3SrchThrd_32, config<T,arch>::gje3SrchThrd_33,
config<T,arch>::gje3SrchThrd_34, config<T,arch>::gje3SrchThrd_35,
config<T,arch>::gje3SrchThrd_36, config<T,arch>::gje3SrchThrd_37,
config<T,arch>::gje3SrchThrd_38, config<T,arch>::gje3SrchThrd_39,
config<T,arch>::gje3SrchThrd_40, config<T,arch>::gje3SrchThrd_41,
config<T,arch>::gje3SrchThrd_42, config<T,arch>::gje3SrchThrd_43,
config<T,arch>::gje3SrchThrd_44, config<T,arch>::gje3SrchThrd_45,
config<T,arch>::gje3SrchThrd_46, config<T,arch>::gje3SrchThrd_47,
config<T,arch>::gje3SrchThrd_48, config<T,arch>::gje3SrchThrd_49,
config<T,arch>::gje3SrchThrd_50, config<T,arch>::gje3SrchThrd_51,
config<T,arch>::gje3SrchThrd_52, config<T,arch>::gje3SrchThrd_53,
config<T,arch>::gje3SrchThrd_54, config<T,arch>::gje3SrchThrd_55,
config<T,arch>::gje3SrchThrd_56, config<T,arch>::gje3SrchThrd_57,
config<T,arch>::gje3SrchThrd_58, config<T,arch>::gje3SrchThrd_59,
config<T,arch>::gje3SrchThrd_60, config<T,arch>::gje3SrchThrd_61,
config<T,arch>::gje3SrchThrd_62, config<T,arch>::gje3SrchThrd_63,
config<T,arch>::gje3SrchThrd_64, config<T,arch>::gje3SrchThrd_65,
config<T,arch>::gje3SrchThrd_66, config<T,arch>::gje3SrchThrd_67,
config<T,arch>::gje3SrchThrd_68, config<T,arch>::gje3SrchThrd_69,
config<T,arch>::gje3SrchThrd_70, config<T,arch>::gje3SrchThrd_71,
config<T,arch>::gje3SrchThrd_72, config<T,arch>::gje3SrchThrd_73,
config<T,arch>::gje3SrchThrd_74, config<T,arch>::gje3SrchThrd_75,
config<T,arch>::gje3SrchThrd_76, config<T,arch>::gje3SrchThrd_77,
config<T,arch>::gje3SrchThrd_78, config<T,arch>::gje3SrchThrd_79,
config<T,arch>::gje3SrchThrd_80, config<T,arch>::gje3SrchThrd_81,
config<T,arch>::gje3SrchThrd_82, config<T,arch>::gje3SrchThrd_83,
config<T,arch>::gje3SrchThrd_84, config<T,arch>::gje3SrchThrd_85,
config<T,arch>::gje3SrchThrd_86, config<T,arch>::gje3SrchThrd_87,
config<T,arch>::gje3SrchThrd_88, config<T,arch>::gje3SrchThrd_89,
config<T,arch>::gje3SrchThrd_90, config<T,arch>::gje3SrchThrd_91,
config<T,arch>::gje3SrchThrd_92, config<T,arch>::gje3SrchThrd_93,
config<T,arch>::gje3SrchThrd_94, config<T,arch>::gje3SrchThrd_95,
config<T,arch>::gje3SrchThrd_96, config<T,arch>::gje3SrchThrd_97,
config<T,arch>::gje3SrchThrd_98, config<T,arch>::gje3SrchThrd_99,
config<T,arch>::gje3SrchThrd_100,config<T,arch>::gje3SrchThrd_101,
config<T,arch>::gje3SrchThrd_102,config<T,arch>::gje3SrchThrd_103,
config<T,arch>::gje3SrchThrd_104,config<T,arch>::gje3SrchThrd_105,
config<T,arch>::gje3SrchThrd_106,config<T,arch>::gje3SrchThrd_107,
config<T,arch>::gje3SrchThrd_108,config<T,arch>::gje3SrchThrd_109
};
func pf[110] = {
0,
0,
matinv_gje3<T, config<T,arch>::gje3Pad_02, config<T,arch>::gje3SrchThrd_02, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_03, config<T,arch>::gje3SrchThrd_03, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_04, config<T,arch>::gje3SrchThrd_04, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_05, config<T,arch>::gje3SrchThrd_05, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_06, config<T,arch>::gje3SrchThrd_06, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_07, config<T,arch>::gje3SrchThrd_07, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_08, config<T,arch>::gje3SrchThrd_08, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_09, config<T,arch>::gje3SrchThrd_09, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_10, config<T,arch>::gje3SrchThrd_10, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_11, config<T,arch>::gje3SrchThrd_11, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_12, config<T,arch>::gje3SrchThrd_12, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_13, config<T,arch>::gje3SrchThrd_13, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_14, config<T,arch>::gje3SrchThrd_14, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_15, config<T,arch>::gje3SrchThrd_15, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_16, config<T,arch>::gje3SrchThrd_16, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_17, config<T,arch>::gje3SrchThrd_17, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_18, config<T,arch>::gje3SrchThrd_18, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_19, config<T,arch>::gje3SrchThrd_19, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_20, config<T,arch>::gje3SrchThrd_20, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_21, config<T,arch>::gje3SrchThrd_21, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_22, config<T,arch>::gje3SrchThrd_22, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_23, config<T,arch>::gje3SrchThrd_23, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_24, config<T,arch>::gje3SrchThrd_24, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_25, config<T,arch>::gje3SrchThrd_25, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_26, config<T,arch>::gje3SrchThrd_26, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_27, config<T,arch>::gje3SrchThrd_27, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_28, config<T,arch>::gje3SrchThrd_28, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_29, config<T,arch>::gje3SrchThrd_29, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_30, config<T,arch>::gje3SrchThrd_30, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_31, config<T,arch>::gje3SrchThrd_31, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_32, config<T,arch>::gje3SrchThrd_32, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_33, config<T,arch>::gje3SrchThrd_33, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_34, config<T,arch>::gje3SrchThrd_34, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_35, config<T,arch>::gje3SrchThrd_35, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_36, config<T,arch>::gje3SrchThrd_36, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_37, config<T,arch>::gje3SrchThrd_37, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_38, config<T,arch>::gje3SrchThrd_38, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_39, config<T,arch>::gje3SrchThrd_39, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_40, config<T,arch>::gje3SrchThrd_40, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_41, config<T,arch>::gje3SrchThrd_41, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_42, config<T,arch>::gje3SrchThrd_42, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_43, config<T,arch>::gje3SrchThrd_43, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_44, config<T,arch>::gje3SrchThrd_44, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_45, config<T,arch>::gje3SrchThrd_45, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_46, config<T,arch>::gje3SrchThrd_46, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_47, config<T,arch>::gje3SrchThrd_47, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_48, config<T,arch>::gje3SrchThrd_48, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_49, config<T,arch>::gje3SrchThrd_49, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_50, config<T,arch>::gje3SrchThrd_50, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_51, config<T,arch>::gje3SrchThrd_51, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_52, config<T,arch>::gje3SrchThrd_52, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_53, config<T,arch>::gje3SrchThrd_53, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_54, config<T,arch>::gje3SrchThrd_54, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_55, config<T,arch>::gje3SrchThrd_55, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_56, config<T,arch>::gje3SrchThrd_56, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_57, config<T,arch>::gje3SrchThrd_57, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_58, config<T,arch>::gje3SrchThrd_58, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_59, config<T,arch>::gje3SrchThrd_59, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_60, config<T,arch>::gje3SrchThrd_60, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_61, config<T,arch>::gje3SrchThrd_61, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_62, config<T,arch>::gje3SrchThrd_62, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_63, config<T,arch>::gje3SrchThrd_63, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_64, config<T,arch>::gje3SrchThrd_64, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_65, config<T,arch>::gje3SrchThrd_65, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_66, config<T,arch>::gje3SrchThrd_66, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_67, config<T,arch>::gje3SrchThrd_67, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_68, config<T,arch>::gje3SrchThrd_68, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_69, config<T,arch>::gje3SrchThrd_69, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_70, config<T,arch>::gje3SrchThrd_70, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_71, config<T,arch>::gje3SrchThrd_71, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_72, config<T,arch>::gje3SrchThrd_72, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_73, config<T,arch>::gje3SrchThrd_73, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_74, config<T,arch>::gje3SrchThrd_74, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_75, config<T,arch>::gje3SrchThrd_75, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_76, config<T,arch>::gje3SrchThrd_76, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_77, config<T,arch>::gje3SrchThrd_77, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_78, config<T,arch>::gje3SrchThrd_78, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_79, config<T,arch>::gje3SrchThrd_79, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_80, config<T,arch>::gje3SrchThrd_80, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_81, config<T,arch>::gje3SrchThrd_81, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_82, config<T,arch>::gje3SrchThrd_82, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_83, config<T,arch>::gje3SrchThrd_83, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_84, config<T,arch>::gje3SrchThrd_84, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_85, config<T,arch>::gje3SrchThrd_85, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_86, config<T,arch>::gje3SrchThrd_86, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_87, config<T,arch>::gje3SrchThrd_87, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_88, config<T,arch>::gje3SrchThrd_88, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_89, config<T,arch>::gje3SrchThrd_89, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_90, config<T,arch>::gje3SrchThrd_90, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_91, config<T,arch>::gje3SrchThrd_91, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_92, config<T,arch>::gje3SrchThrd_92, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_93, config<T,arch>::gje3SrchThrd_93, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_94, config<T,arch>::gje3SrchThrd_94, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_95, config<T,arch>::gje3SrchThrd_95, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_96, config<T,arch>::gje3SrchThrd_96, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_97, config<T,arch>::gje3SrchThrd_97, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_98, config<T,arch>::gje3SrchThrd_98, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_99, config<T,arch>::gje3SrchThrd_99, arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_100,config<T,arch>::gje3SrchThrd_100,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_101,config<T,arch>::gje3SrchThrd_101,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_102,config<T,arch>::gje3SrchThrd_102,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_103,config<T,arch>::gje3SrchThrd_103,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_104,config<T,arch>::gje3SrchThrd_104,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_105,config<T,arch>::gje3SrchThrd_105,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_106,config<T,arch>::gje3SrchThrd_106,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_107,config<T,arch>::gje3SrchThrd_107,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_108,config<T,arch>::gje3SrchThrd_108,arch>,
matinv_gje3<T, config<T,arch>::gje3Pad_109,config<T,arch>::gje3SrchThrd_109,arch>,
};
if (n < config<T,arch>::gje3MinDim || n > config<T,arch>::gje3MaxDim ||
batch < 1) {
return -1;
}
dim3 dimBlock(dimX[n], n);
dim3 dimGrid;
if (batch <= GRID_DIM_LIMIT) {
dimGrid.x = batch;
dimGrid.y = 1;
dimGrid.z = 1;
} else {
dimGrid.x = GRID_DIM_LIMIT;
dimGrid.y = (batch + GRID_DIM_LIMIT-1) / GRID_DIM_LIMIT;
dimGrid.z = 1;
}
int smem_size = (sizeof(A_d[0]) * (n + padding[n]) * (n) + // As
sizeof(typename config<T,arch>::absValType) * srchThrd[n] + // Val
sizeof(int) * srchThrd[n] + // Loc
sizeof(int) * n + // icol
sizeof(int) * n); // perm
pf[n]<<<dimGrid,dimBlock,smem_size>>>(A_d,Ainv_d,n,batch);
/* Check synchronous errors, i.e. pre-launch */
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
return -2;
}
return 0;
}
template <typename T, int arch>
int matinv_MatPerThread (const T *A_d, T *Ainv_d, int n, int batch)
{
typedef void (* func)(const T *A_d, T *Ainv_d, int batch);
int minBatchSize [11] = {
0x7fffffff,
0x7fffffff,
config<T,arch>::matInv2x2MinBatch,
config<T,arch>::matInv3x3MinBatch,
config<T,arch>::matInv4x4MinBatch,
config<T,arch>::matInv5x5MinBatch,
config<T,arch>::matInv6x6MinBatch,
config<T,arch>::matInv7x7MinBatch,
config<T,arch>::matInv8x8MinBatch,
config<T,arch>::matInv9x9MinBatch,
config<T,arch>::matInv10x10MinBatch
};
func pf[11] = {
0,
0,
matinv_2x2_matrix_per_thread<T,arch>,
matinv_3x3_matrix_per_thread<T,arch>,
matinv_4x4_matrix_per_thread<T,arch>,
matinv_5x5_matrix_per_thread<T,arch>,
matinv_6x6_matrix_per_thread<T,arch>,
matinv_7x7_matrix_per_thread<T,arch>,
matinv_8x8_matrix_per_thread<T,arch>,
matinv_9x9_matrix_per_thread<T,arch>,
matinv_10x10_matrix_per_thread<T,arch>
};
cudaError_t err;
dim3 dimBlock(128);
dim3 dimGrid;
int numBlocks;
if (n < config<T,arch>::matInvMinDim || batch < 1) {
return -1;
}
if (n > config<T,arch>::matInvMaxDim || batch < minBatchSize[n]) {
return 1;
}
switch (n) {
case 4:
err = cudaFuncSetCacheConfig (matinv_4x4_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
case 5:
err = cudaFuncSetCacheConfig (matinv_5x5_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
case 6:
err = cudaFuncSetCacheConfig (matinv_6x6_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
case 7:
err = cudaFuncSetCacheConfig (matinv_7x7_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
case 8:
err = cudaFuncSetCacheConfig (matinv_8x8_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
case 9:
err = cudaFuncSetCacheConfig (matinv_9x9_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
case 10:
err = cudaFuncSetCacheConfig (matinv_10x10_matrix_per_thread<T,arch>,
cudaFuncCachePreferL1);
break;
default:
err = cudaSuccess;
break;
}
if (err != cudaSuccess) {
return -2;
}
numBlocks = (batch + dimBlock.x - 1) / dimBlock.x;
if (numBlocks <= GRID_DIM_LIMIT) {
dimGrid.x = numBlocks;
dimGrid.y = 1;
dimGrid.z = 1;
} else {
dimGrid.x = GRID_DIM_LIMIT;
dimGrid.y = (numBlocks + GRID_DIM_LIMIT-1) / GRID_DIM_LIMIT;
dimGrid.z = 1;
}
pf[n]<<<dimGrid,dimBlock>>>(A_d,Ainv_d,batch);
/* Check synchronous errors, i.e. pre-launch */
err = cudaGetLastError();
if (cudaSuccess != err) {
return -2;
}
return 0;
}
/* C callable wrapper functions */
int smatinv_batch (float *A, float *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<float,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<float,GPU_ARCH>(A, Ainv, n, batch);
}
int dmatinv_batch (double *A, double *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<double,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<double,GPU_ARCH>(A, Ainv, n, batch);
}
int cmatinv_batch (cuComplex *A, cuComplex *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<cuComplex,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<cuComplex,GPU_ARCH>(A, Ainv, n, batch);
}
int zmatinv_batch (cuDoubleComplex *A, cuDoubleComplex *Ainv, int n, int batch)
{
int stat;
stat = matinv_MatPerThread<cuDoubleComplex,GPU_ARCH>(A, Ainv, n, batch);
if (stat <= 0) return stat;
return matinv_gje3<cuDoubleComplex,GPU_ARCH>(A, Ainv, n, batch);
}
|
f2cea905a7d3af9479692ed56cb07be45b0ec82c.hip | // !!! This is a file automatically generated by hipify!!!
// Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#define XINTREE
#define UNROLL
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "device_atomic_functions.h"
#include <functional>
#include <vector>
#include "equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1<<BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS+1+1;
static const u32 SLOTRANGE = 1<<SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE-1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1<<RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS-1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES+HASHESPERBLAKE-1)/HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 8;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS+RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK+1)/2];
bucket1 *trees1[WK/2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 hlen, const char *nonce, const u32 nlen) {
setheader(&blake_ctx, header, hlen, nonce, nlen);
checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ void orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i=0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size+i];
indices[size+i] = tmp;
}
}
}
__device__ void listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
}
__device__ void listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
listindices1(buck[t.slotid0()].attr, indices);
listindices1(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
listindices2(buck[t.slotid0()].attr, indices);
listindices2(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
listindices3(buck[t.slotid0()].attr, indices);
listindices3(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
listindices4(buck[t.slotid0()].attr, indices);
listindices4(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
#if WK == 9
__device__ void listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
listindices5(buck[t.slotid0()].attr, indices);
listindices5(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
listindices6(buck[t.slotid0()].attr, indices);
listindices6(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
listindices7(buck[t.slotid0()].attr, indices);
listindices7(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
listindices8(buck[t.slotid0()].attr, indices);
listindices8(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
#endif
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
listindices9(t, prf);
#elif WK==5
listindices5(t, prf);
#else
#error not implemented
#endif
if (probdupe(prf))
return;
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(hipMemcpy(ns, nslots[r&1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS-6);
binsizes[bsize]++;
}
for (u32 i=0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
// proper dupe test is a little costly on GPU, so allow false negatives
__device__ bool probdupe(u32 *prf) {
unsigned short susp[PROOFSIZE];
memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short));
for (u32 i=0; i<PROOFSIZE; i++) {
u32 bin = prf[i] & (PROOFSIZE-1);
unsigned short msb = prf[i]>>WK;
if (msb == susp[bin])
return true;
susp[bin] = msb;
}
return false;
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r): hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif WN == 200 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return pslot->hash->bytes[prevbo] &0x3f;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits-1].word == hash1[prevhashunits-1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ bool addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
return true;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
return true;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN/8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
const u32 xhash = ph[1] & 0xf;
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (xhash = bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) & 0xf) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 2
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r-1)/2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]);
u32 xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 6)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS-1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
void runEquihash(int nthreads, int tpb, equi* eq, equi* device_eq, proof* sols) {
printf("Digit 0\n");
hipLaunchKernelGGL(( digitH), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
printf("Digit %d\n", 1);
hipLaunchKernelGGL(( digit_1), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 2);
hipLaunchKernelGGL(( digit2), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 3);
hipLaunchKernelGGL(( digit3), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 4);
hipLaunchKernelGGL(( digit4), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 5);
hipLaunchKernelGGL(( digit5), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 6);
hipLaunchKernelGGL(( digit6), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 7);
hipLaunchKernelGGL(( digit7), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
printf("Digit %d\n", 8);
hipLaunchKernelGGL(( digit8), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
#else
for (u32 r = 1; r < WK; r++) {
printf("Digit %d\n", r);
r & 1 ?hipLaunchKernelGGL(( digitO), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq, r)
:hipLaunchKernelGGL(( digitE), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq, r);
eq.showbsizes(r);
}
#endif
printf("Digit %d\n", WK);
hipLaunchKernelGGL(( digitK), dim3(nthreads / tpb), dim3(tpb), 0, 0, device_eq);
checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(sols, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost));
} | f2cea905a7d3af9479692ed56cb07be45b0ec82c.cu | // Equihash CUDA solver
// Copyright (c) 2016 John Tromp
#define XINTREE
#define UNROLL
#define htole32(x) (x)
#define HAVE_DECL_HTOLE32 1
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_atomic_functions.h"
#include <functional>
#include <vector>
#include "equi.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "blake2b.cu"
typedef uint16_t u16;
typedef uint64_t u64;
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
#ifndef RESTBITS
#define RESTBITS 4
#endif
// 2_log of number of buckets
#define BUCKBITS (DIGITBITS-RESTBITS)
#ifndef SAVEMEM
#if RESTBITS == 4
// can't save memory in such small buckets
#define SAVEMEM 1
#elif RESTBITS >= 8
// take advantage of law of large numbers (sum of 2^8 random numbers)
// this reduces (200,9) memory to under 144MB, with negligible discarding
#define SAVEMEM 9/14
#endif
#endif
// number of buckets
static const u32 NBUCKETS = 1<<BUCKBITS;
// 2_log of number of slots per bucket
static const u32 SLOTBITS = RESTBITS+1+1;
static const u32 SLOTRANGE = 1<<SLOTBITS;
// number of slots per bucket
static const u32 NSLOTS = SLOTRANGE * SAVEMEM;
// SLOTBITS mask
static const u32 SLOTMASK = SLOTRANGE-1;
// number of possible values of xhash (rest of n) bits
static const u32 NRESTS = 1<<RESTBITS;
// RESTBITS mask
static const u32 RESTMASK = NRESTS-1;
// number of blocks of hashes extracted from single 512 bit blake2b output
static const u32 NBLOCKS = (NHASHES+HASHESPERBLAKE-1)/HASHESPERBLAKE;
// nothing larger found in 100000 runs
static const u32 MAXSOLS = 8;
// tree node identifying its children as two different slots in
// a bucket on previous layer with the same rest bits (x-tra hash)
struct tree {
u32 bid_s0_s1_x; // manual bitfields
__device__ tree(const u32 idx, const u32 xh) {
bid_s0_s1_x = idx << RESTBITS | xh;
}
__device__ tree(const u32 idx) {
bid_s0_s1_x = idx;
}
#ifdef XINTREE
__device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) {
bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh;
#else
__device__ tree(const u32 bid, const u32 s0, const u32 s1) {
bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1;
#endif
}
__device__ u32 getindex() const {
#ifdef XINTREE
return bid_s0_s1_x >> RESTBITS;
#else
return bid_s0_s1_x;
#endif
}
__device__ u32 bucketid() const {
#ifdef XINTREE
return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS);
#else
return bid_s0_s1_x >> (2 * SLOTBITS);
#endif
}
__device__ u32 slotid0() const {
#ifdef XINTREE
return (bid_s0_s1_x >> SLOTBITS+RESTBITS) & SLOTMASK;
#else
return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK;
#endif
}
__device__ u32 slotid1() const {
#ifdef XINTREE
return (bid_s0_s1_x >> RESTBITS) & SLOTMASK;
#else
return bid_s0_s1_x & SLOTMASK;
#endif
}
__device__ u32 xhash() const {
return bid_s0_s1_x & RESTMASK;
}
};
union hashunit {
u32 word;
uchar bytes[sizeof(u32)];
};
#define WORDS(bits) ((bits + 31) / 32)
#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS)
#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS)
struct slot0 {
tree attr;
hashunit hash[HASHWORDS0];
};
struct slot1 {
tree attr;
hashunit hash[HASHWORDS1];
};
// a bucket is NSLOTS treenodes
typedef slot0 bucket0[NSLOTS];
typedef slot1 bucket1[NSLOTS];
// the N-bit hash consists of K+1 n-bit "digits"
// each of which corresponds to a layer of NBUCKETS buckets
typedef bucket0 digit0[NBUCKETS];
typedef bucket1 digit1[NBUCKETS];
// size (in bytes) of hash in round 0 <= r < WK
u32 hhashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
// size (in bytes) of hash in round 0 <= r < WK
__device__ u32 hashsize(const u32 r) {
#ifdef XINTREE
const u32 hashbits = WN - (r+1) * DIGITBITS;
#else
const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS;
#endif
return (hashbits + 7) / 8;
}
u32 hhashwords(u32 bytes) {
return (bytes + 3) / 4;
}
__device__ u32 hashwords(u32 bytes) {
return (bytes + 3) / 4;
}
// manages hash and tree data
struct htalloc {
bucket0 *trees0[(WK+1)/2];
bucket1 *trees1[WK/2];
};
typedef u32 bsizes[NBUCKETS];
struct equi {
blake2b_state blake_ctx;
htalloc hta;
bsizes *nslots;
proof *sols;
u32 nsols;
u32 nthreads;
equi(const u32 n_threads) {
nthreads = n_threads;
}
void setheadernonce(const char *header, const u32 hlen, const char *nonce, const u32 nlen) {
setheader(&blake_ctx, header, hlen, nonce, nlen);
checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32)));
nsols = 0;
}
__device__ u32 getnslots0(const u32 bid) {
u32 &nslot = nslots[0][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ u32 getnslots1(const u32 bid) {
u32 &nslot = nslots[1][bid];
const u32 n = min(nslot, NSLOTS);
nslot = 0;
return n;
}
__device__ void orderindices(u32 *indices, u32 size) {
if (indices[0] > indices[size]) {
for (u32 i=0; i < size; i++) {
const u32 tmp = indices[i];
indices[i] = indices[size+i];
indices[size+i] = tmp;
}
}
}
__device__ void listindices1(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[0][t.bucketid()];
const u32 size = 1 << 0;
indices[0] = buck[t.slotid0()].attr.getindex();
indices[size] = buck[t.slotid1()].attr.getindex();
orderindices(indices, size);
}
__device__ void listindices2(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[0][t.bucketid()];
const u32 size = 1 << 1;
listindices1(buck[t.slotid0()].attr, indices);
listindices1(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices3(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[1][t.bucketid()];
const u32 size = 1 << 2;
listindices2(buck[t.slotid0()].attr, indices);
listindices2(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices4(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[1][t.bucketid()];
const u32 size = 1 << 3;
listindices3(buck[t.slotid0()].attr, indices);
listindices3(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices5(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[2][t.bucketid()];
const u32 size = 1 << 4;
listindices4(buck[t.slotid0()].attr, indices);
listindices4(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
#if WK == 9
__device__ void listindices6(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[2][t.bucketid()];
const u32 size = 1 << 5;
listindices5(buck[t.slotid0()].attr, indices);
listindices5(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices7(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[3][t.bucketid()];
const u32 size = 1 << 6;
listindices6(buck[t.slotid0()].attr, indices);
listindices6(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices8(const tree t, u32 *indices) {
const bucket1 &buck = hta.trees1[3][t.bucketid()];
const u32 size = 1 << 7;
listindices7(buck[t.slotid0()].attr, indices);
listindices7(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
__device__ void listindices9(const tree t, u32 *indices) {
const bucket0 &buck = hta.trees0[4][t.bucketid()];
const u32 size = 1 << 8;
listindices8(buck[t.slotid0()].attr, indices);
listindices8(buck[t.slotid1()].attr, indices+size);
orderindices(indices, size);
}
#endif
__device__ void candidate(const tree t) {
proof prf;
#if WK==9
listindices9(t, prf);
#elif WK==5
listindices5(t, prf);
#else
#error not implemented
#endif
if (probdupe(prf))
return;
u32 soli = atomicAdd(&nsols, 1);
if (soli < MAXSOLS)
#if WK==9
listindices9(t, sols[soli]);
#elif WK==5
listindices5(t, sols[soli]);
#else
#error not implemented
#endif
}
void showbsizes(u32 r) {
#if defined(HIST) || defined(SPARK) || defined(LOGSPARK)
u32 ns[NBUCKETS];
checkCudaErrors(cudaMemcpy(ns, nslots[r&1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost));
u32 binsizes[65];
memset(binsizes, 0, 65 * sizeof(u32));
for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) {
u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS-6);
binsizes[bsize]++;
}
for (u32 i=0; i < 65; i++) {
#ifdef HIST
printf(" %d:%d", i, binsizes[i]);
#else
#ifdef SPARK
u32 sparks = binsizes[i] / SPARKSCALE;
#else
u32 sparks = 0;
for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++;
sparks = sparks * 7 / SPARKSCALE;
#endif
printf("\342\226%c", '\201' + sparks);
#endif
}
printf("\n");
#endif
}
// proper dupe test is a little costly on GPU, so allow false negatives
__device__ bool probdupe(u32 *prf) {
unsigned short susp[PROOFSIZE];
memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short));
for (u32 i=0; i<PROOFSIZE; i++) {
u32 bin = prf[i] & (PROOFSIZE-1);
unsigned short msb = prf[i]>>WK;
if (msb == susp[bin])
return true;
susp[bin] = msb;
}
return false;
}
struct htlayout {
htalloc hta;
u32 prevhashunits;
u32 nexthashunits;
u32 dunits;
u32 prevbo;
u32 nextbo;
__device__ htlayout(equi *eq, u32 r): hta(eq->hta), prevhashunits(0), dunits(0) {
u32 nexthashbytes = hashsize(r);
nexthashunits = hashwords(nexthashbytes);
prevbo = 0;
nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3
if (r) {
u32 prevhashbytes = hashsize(r-1);
prevhashunits = hashwords(prevhashbytes);
prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3
dunits = prevhashunits - nexthashunits;
}
}
__device__ u32 getxhash0(const slot0* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] >> 4;
#elif WN == 200 && RESTBITS == 8
return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4;
#else
#error non implemented
#endif
}
__device__ u32 getxhash1(const slot1* pslot) const {
#ifdef XINTREE
return pslot->attr.xhash();
#elif WN == 200 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 8
return pslot->hash->bytes[prevbo];
#elif WN == 144 && RESTBITS == 4
return pslot->hash->bytes[prevbo] & 0xf;
#elif WN == 200 && RESTBITS == 6
return pslot->hash->bytes[prevbo] &0x3f;
#else
#error non implemented
#endif
}
__device__ bool equal(const hashunit *hash0, const hashunit *hash1) const {
return hash0[prevhashunits-1].word == hash1[prevhashunits-1].word;
}
};
struct collisiondata {
#ifdef XBITMAP
#if NSLOTS > 64
#error cant use XBITMAP with more than 64 slots
#endif
u64 xhashmap[NRESTS];
u64 xmap;
#else
#if RESTBITS <= 6
typedef uchar xslot;
#else
typedef u16 xslot;
#endif
static const xslot xnil = ~0;
xslot xhashslots[NRESTS];
xslot nextxhashslot[NSLOTS];
xslot nextslot;
#endif
u32 s0;
__device__ void clear() {
#ifdef XBITMAP
memset(xhashmap, 0, NRESTS * sizeof(u64));
#else
memset(xhashslots, xnil, NRESTS * sizeof(xslot));
memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot));
#endif
}
__device__ bool addslot(u32 s1, u32 xh) {
#ifdef XBITMAP
xmap = xhashmap[xh];
xhashmap[xh] |= (u64)1 << s1;
s0 = ~0;
return true;
#else
nextslot = xhashslots[xh];
nextxhashslot[s1] = nextslot;
xhashslots[xh] = s1;
return true;
#endif
}
__device__ bool nextcollision() const {
#ifdef XBITMAP
return xmap != 0;
#else
return nextslot != xnil;
#endif
}
__device__ u32 slot() {
#ifdef XBITMAP
const u32 ffs = __ffsll(xmap);
s0 += ffs; xmap >>= ffs;
#else
nextslot = nextxhashslot[s0 = nextslot];
#endif
return s0;
}
};
};
__global__ void digitH(equi *eq) {
uchar hash[HASHOUT];
blake2b_state state;
equi::htlayout htl(eq, 0);
const u32 hashbytes = hashsize(0);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 block = id; block < NBLOCKS; block += eq->nthreads) {
state = eq->blake_ctx;
blake2b_gpu_hash(&state, block, hash, HASHOUT);
for (u32 i = 0; i<HASHESPERBLAKE; i++) {
const uchar *ph = hash + i * WN/8;
#if BUCKBITS == 16 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 8) | ph[1];
#ifdef XINTREE
const u32 xhash = ph[2] >> 4;
#endif
#elif BUCKBITS == 14 && RESTBITS == 6
const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2;
#elif BUCKBITS == 12 && RESTBITS == 8
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
#elif BUCKBITS == 20 && RESTBITS == 4
const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4;
#ifdef XINTREE
const u32 xhash = ph[2] & 0xf;
#endif
#elif BUCKBITS == 12 && RESTBITS == 4
const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4;
const u32 xhash = ph[1] & 0xf;
#else
#error not implemented
#endif
const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1);
if (slot >= NSLOTS)
continue;
slot0 &s = eq->hta.trees0[0][bucketid][slot];
#ifdef XINTREE
s.attr = tree(block*HASHESPERBLAKE+i, xhash);
#else
s.attr = tree(block*HASHESPERBLAKE+i);
#endif
memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes);
}
}
}
__global__ void digitO(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(r-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
u32 xhash;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (xhash = bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
xhash &= 0xf;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
xhash &= 0xf;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) & 0xf) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 2
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 6;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
__global__ void digitE(equi *eq, const u32 r) {
equi::htlayout htl(eq, r);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[(r-1)/2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
u32 xorbucketid;
const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes;
#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE)
xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8)
| (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]);
u32 xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4
xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4)
| (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4;
#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4;
#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6
xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 6)
| (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 2;
#else
#error not implemented
#endif
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[r/2][xorbucketid][xorslot];
#ifdef XINTREE
xs.attr = tree(bucketid, s0, s1, xhash);
#else
xs.attr = tree(bucketid, s0, s1);
#endif
for (u32 i=htl.dunits; i < htl.prevhashunits; i++)
xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word;
}
}
}
}
#ifdef UNROLL
// bucket mask
static const u32 BUCKMASK = NBUCKETS-1;
__global__ void digit_1(equi *eq) {
equi::htlayout htl(eq, 1);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[0][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word;
}
}
}
}
__global__ void digit2(equi *eq) {
equi::htlayout htl(eq, 2);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[0][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x0123);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit3(equi *eq) {
equi::htlayout htl(eq, 3);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[1][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x1234);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word;
}
}
}
}
__global__ void digit4(equi *eq) {
equi::htlayout htl(eq, 4);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[1][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4123);
const u32 xorbucketid = bexor >> 8;
const u32 xhash = bexor >> 4 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit5(equi *eq) {
equi::htlayout htl(eq, 5);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[2][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word;
}
}
}
}
__global__ void digit6(equi *eq) {
equi::htlayout htl(eq, 6);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[2][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x2345);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word;
}
}
}
}
__global__ void digit7(equi *eq) {
equi::htlayout htl(eq, 7);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[3][bucketid];
u32 bsize = eq->getnslots0(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 bexor = __byte_perm(xor0, 0, 0x4012);
const u32 xorbucketid = bexor >> 4 & BUCKMASK;
const u32 xhash = bexor & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor0;
xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word;
}
}
}
}
__global__ void digit8(equi *eq) {
equi::htlayout htl(eq, 8);
equi::collisiondata cd;
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot1 *buck = htl.hta.trees1[3][bucketid];
u32 bsize = eq->getnslots1(bucketid);
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot1 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash1(pslot1)))
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot1 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash))
continue;
const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word;
const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word;
const u32 bexor = __byte_perm(xor0, xor1, 0x3456);
const u32 xorbucketid = bexor >> 16;
const u32 xhash = bexor >> 12 & 0xf;
const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1);
if (xorslot >= NSLOTS)
continue;
slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot];
xs.attr = tree(bucketid, s0, s1, xhash);
xs.hash[0].word = xor1;
}
}
}
}
#endif
__global__ void digitK(equi *eq) {
equi::collisiondata cd;
equi::htlayout htl(eq, WK);
const u32 id = blockIdx.x * blockDim.x + threadIdx.x;
for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) {
cd.clear();
slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid];
u32 bsize = eq->getnslots0(bucketid); // assume WK odd
for (u32 s1 = 0; s1 < bsize; s1++) {
const slot0 *pslot1 = buck + s1;
if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd
continue;
for (; cd.nextcollision(); ) {
const u32 s0 = cd.slot();
const slot0 *pslot0 = buck + s0;
if (htl.equal(pslot0->hash, pslot1->hash)) {
#ifdef XINTREE
eq->candidate(tree(bucketid, s0, s1, 0));
#else
eq->candidate(tree(bucketid, s0, s1));
#endif
}
}
}
}
}
void runEquihash(int nthreads, int tpb, equi* eq, equi* device_eq, proof* sols) {
printf("Digit 0\n");
digitH<<<nthreads / tpb, tpb>>>(device_eq);
#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL)
printf("Digit %d\n", 1);
digit_1<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 2);
digit2<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 3);
digit3<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 4);
digit4<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 5);
digit5<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 6);
digit6<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 7);
digit7<<<nthreads / tpb, tpb>>>(device_eq);
printf("Digit %d\n", 8);
digit8<<<nthreads / tpb, tpb>>>(device_eq);
#else
for (u32 r = 1; r < WK; r++) {
printf("Digit %d\n", r);
r & 1 ? digitO<<<nthreads / tpb, tpb>>>(device_eq, r)
: digitE<<<nthreads / tpb, tpb>>>(device_eq, r);
eq.showbsizes(r);
}
#endif
printf("Digit %d\n", WK);
digitK<<<nthreads / tpb, tpb>>>(device_eq);
checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(sols, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost));
} |
39b6358ebf5110effb1f8d9ce0a6fb9d8495f2e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <rocblas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
EXPORT const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
EXPORT const char* get_last_clib_error() {
return strerror(errno);
}
EXPORT int cublas_init() {
return hipblasInit();
}
EXPORT int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
EXPORT int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
if (pFile == NULL) {
return ERROR_FILE_OPEN;
}
for (int i = 0; i < NUM_RND_STREAMS; i++) {
if (fscanf (pFile, "%u", &host_mults[i]) != 1) {
return ERROR_FILE_SCAN;
}
}
fclose (pFile);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
EXPORT int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
EXPORT int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
EXPORT void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
EXPORT void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
EXPORT int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
EXPORT int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
EXPORT int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
EXPORT int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
EXPORT int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
EXPORT int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
EXPORT int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
EXPORT int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
EXPORT void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
EXPORT int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
EXPORT int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
EXPORT int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
EXPORT int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int divide_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByColVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int divide_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByRowVector), dim3(NUM_VECTOR_OP_BLOCKS(w*h)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEquals), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEqualsScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int minimum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinimum), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int minimum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinimumScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int maximum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaximum), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int maximum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaximumScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int min_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMinRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaxRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int argmin_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMinColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMinRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kArgMaxRowwise), dim3(h),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_soft_threshold(cudamat* mat, float alpha, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySoftThreshold), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_gamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGamma), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_lgamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogGamma), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
// gemv if second matrix is a (column) vector
if (n == 1) {
hipblasSgemv(get_transpose_char(mat1), mat1->size[0], mat1->size[1],
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, 1,
beta, target->data_device, 1);
}
// gemv if first matrix is a (row) vector
else if (m == 1) {
hipblasSgemv(mat2->is_trans ? 'n' : 't', mat2->size[0], mat2->size[1],
alpha, mat2->data_device, mat2->size[0],
mat1->data_device, 1,
beta, target->data_device, 1);
}
// gemm otherwise
else {
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
}
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
hipDeviceSynchronize();
return 0;
}
EXPORT float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
EXPORT int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
EXPORT int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
hipblasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
EXPORT int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
EXPORT int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
hipblasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
EXPORT int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = hipblasSnrm2(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
EXPORT float manhattan_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = hipblasSasum(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
EXPORT int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int where(cudamat* condition_mat, cudamat* if_mat, cudamat* else_mat, cudamat* target) {
unsigned int len = condition_mat->size[0] * condition_mat->size[1];
if (!condition_mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (condition_mat->size[0] != target->size[0] || condition_mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != if_mat->size[0] || condition_mat->size[1] != if_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != else_mat->size[0] || condition_mat->size[1] != else_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kWhere), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, condition_mat->data_device,
if_mat->data_device, else_mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int correlate(cudamat* source, cudamat* kernel, cudamat* dest) {
int len = source->size[0] * source->size[1];
if (!source->on_device || !kernel->on_device || !dest->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->size[0] != dest->size[0] || source->size[1] != dest->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (kernel->size[0] % 2 == 0 || kernel->size[1] % 2 == 0 ||
kernel->size[0] > source->size[0] || kernel->size[1] > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCorrelate), dim3(NUM_VECTOR_OP_BLOCKS(len)),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK(len)), 0, 0, source->data_device,
kernel->data_device, dest->data_device, source->size[1], source->size[0],
kernel->size[1], kernel->size[0]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
}
| 39b6358ebf5110effb1f8d9ce0a6fb9d8495f2e1.cu | #include <stdio.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <cublas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
EXPORT const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
EXPORT const char* get_last_clib_error() {
return strerror(errno);
}
EXPORT int cublas_init() {
return cublasInit();
}
EXPORT int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
EXPORT int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
if (pFile == NULL) {
return ERROR_FILE_OPEN;
}
for (int i = 0; i < NUM_RND_STREAMS; i++) {
if (fscanf (pFile, "%u", &host_mults[i]) != 1) {
return ERROR_FILE_SCAN;
}
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
EXPORT int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
EXPORT int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
EXPORT void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
EXPORT void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
EXPORT int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
EXPORT int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
EXPORT int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
EXPORT int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
EXPORT int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
EXPORT int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
EXPORT int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
EXPORT int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
EXPORT void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
EXPORT int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
EXPORT int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
EXPORT int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
EXPORT int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int divide_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int divide_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS(w*h),NUM_VECTOR_OP_THREADS_PER_BLOCK(w*h)>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEquals<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEqualsScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int minimum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinimum<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int minimum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinimumScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int maximum(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaximum<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int maximum_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaximumScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int min_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMinRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaxRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int argmin_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMinColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMinRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
} else {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kArgMaxRowwise<<<h,32>>>(mat->data_device, target->data_device, w, h);
}
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_soft_threshold(cudamat* mat, float alpha, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySoftThreshold<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_gamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGamma<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_lgamma(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogGamma<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
// gemv if second matrix is a (column) vector
if (n == 1) {
cublasSgemv(get_transpose_char(mat1), mat1->size[0], mat1->size[1],
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, 1,
beta, target->data_device, 1);
}
// gemv if first matrix is a (row) vector
else if (m == 1) {
cublasSgemv(mat2->is_trans ? 'n' : 't', mat2->size[0], mat2->size[1],
alpha, mat2->data_device, mat2->size[0],
mat1->data_device, 1,
beta, target->data_device, 1);
}
// gemm otherwise
else {
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
}
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
cudaThreadSynchronize();
return 0;
}
EXPORT float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
EXPORT int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
EXPORT int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
cublasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kAdd<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
EXPORT int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
EXPORT int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
cublasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kMultScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
EXPORT int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = cublasSnrm2(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
EXPORT float manhattan_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device) {
*err_code = ERROR_NOT_ON_DEVICE;
return -1.;
}
float res = cublasSasum(len, mat->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
EXPORT int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
EXPORT int where(cudamat* condition_mat, cudamat* if_mat, cudamat* else_mat, cudamat* target) {
unsigned int len = condition_mat->size[0] * condition_mat->size[1];
if (!condition_mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (condition_mat->size[0] != target->size[0] || condition_mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != if_mat->size[0] || condition_mat->size[1] != if_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (condition_mat->size[0] != else_mat->size[0] || condition_mat->size[1] != else_mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kWhere<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(condition_mat->data_device,
if_mat->data_device, else_mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
EXPORT int correlate(cudamat* source, cudamat* kernel, cudamat* dest) {
int len = source->size[0] * source->size[1];
if (!source->on_device || !kernel->on_device || !dest->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->size[0] != dest->size[0] || source->size[1] != dest->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (kernel->size[0] % 2 == 0 || kernel->size[1] % 2 == 0 ||
kernel->size[0] > source->size[0] || kernel->size[1] > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrelate<<<NUM_VECTOR_OP_BLOCKS(len),NUM_VECTOR_OP_THREADS_PER_BLOCK(len)>>>(source->data_device,
kernel->data_device, dest->data_device, source->size[1], source->size[0],
kernel->size[1], kernel->size[0]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
}
|
007418aa64ce2e1450ecdd24f676b1ea520b2fbe.hip | // !!! This is a file automatically generated by hipify!!!
/* =========================================================================
Copyright (c) 2010-2015, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp [email protected]
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/*
*
* Tutorial: Use ViennaCL with user-provided CUDA buffers
*
*/
//
// include necessary system headers
//
#include <iostream>
#include <cstdlib>
#include <string>
#include <hip/hip_runtime.h>
//
// ViennaCL includes
//
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/matrix_operations.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/prod.hpp"
//
// A simple CUDA kernel for the vector operation x += y
//
template<typename T>
__global__ void my_inplace_add_kernel(T * vec1, T * vec2, unsigned int size)
{
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
i < size;
i += gridDim.x * blockDim.x)
vec1[i] += vec2[i];
}
int main()
{
typedef float ScalarType;
//
// Part 1: Allocate some CUDA memory
//
std::size_t size = 10;
ScalarType *cuda_x;
ScalarType *cuda_y;
hipMalloc(&cuda_x, size * sizeof(ScalarType));
hipMalloc(&cuda_y, size * sizeof(ScalarType));
// Initialize with data
std::vector<ScalarType> host_x(size, 1.0);
std::vector<ScalarType> host_y(size, 2.0);
hipMemcpy(cuda_x, &(host_x[0]), size * sizeof(ScalarType), hipMemcpyHostToDevice);
hipMemcpy(cuda_y, &(host_y[0]), size * sizeof(ScalarType), hipMemcpyHostToDevice);
// run kernel
hipLaunchKernelGGL(( my_inplace_add_kernel), dim3(128), dim3(128), 0, 0, cuda_x, cuda_y, static_cast<unsigned int>(1000));
// copy result back
std::vector<ScalarType> result_cuda(size);
hipMemcpy(&(result_cuda[0]), cuda_x, size * sizeof(ScalarType), hipMemcpyDeviceToHost);
std::cout << "Result with CUDA (native): ";
for (std::size_t i=0; i<size; ++i)
std::cout << result_cuda[i] << " ";
std::cout << std::endl;
//
// Part 2: Now do the same within ViennaCL
//
// wrap the existing CUDA buffers inside ViennaCL vectors
viennacl::vector<ScalarType> vcl_vec1(cuda_x, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
viennacl::vector<ScalarType> vcl_vec2(cuda_y, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
// reset values to 0 and 1, respectively
vcl_vec1 = viennacl::scalar_vector<ScalarType>(size, ScalarType(1.0));
vcl_vec2 = viennacl::scalar_vector<ScalarType>(size, ScalarType(2.0));
vcl_vec1 += vcl_vec2;
std::cout << "Result with ViennaCL: " << vcl_vec1 << std::endl;
// ViennaCL does not automatically free your buffers (you're still the owner), so don't forget to clean up :-)
hipFree(cuda_x);
hipFree(cuda_y);
//
// That's it.
//
std::cout << "!!!! TUTORIAL COMPLETED SUCCESSFULLY !!!!" << std::endl;
return EXIT_SUCCESS;
}
| 007418aa64ce2e1450ecdd24f676b1ea520b2fbe.cu | /* =========================================================================
Copyright (c) 2010-2015, Institute for Microelectronics,
Institute for Analysis and Scientific Computing,
TU Wien.
Portions of this software are copyright by UChicago Argonne, LLC.
-----------------
ViennaCL - The Vienna Computing Library
-----------------
Project Head: Karl Rupp [email protected]
(A list of authors and contributors can be found in the PDF manual)
License: MIT (X11), see file LICENSE in the base directory
============================================================================= */
/*
*
* Tutorial: Use ViennaCL with user-provided CUDA buffers
*
*/
//
// include necessary system headers
//
#include <iostream>
#include <cstdlib>
#include <string>
#include <cuda.h>
//
// ViennaCL includes
//
#include "viennacl/vector.hpp"
#include "viennacl/matrix.hpp"
#include "viennacl/linalg/matrix_operations.hpp"
#include "viennacl/linalg/norm_2.hpp"
#include "viennacl/linalg/prod.hpp"
//
// A simple CUDA kernel for the vector operation x += y
//
template<typename T>
__global__ void my_inplace_add_kernel(T * vec1, T * vec2, unsigned int size)
{
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
i < size;
i += gridDim.x * blockDim.x)
vec1[i] += vec2[i];
}
int main()
{
typedef float ScalarType;
//
// Part 1: Allocate some CUDA memory
//
std::size_t size = 10;
ScalarType *cuda_x;
ScalarType *cuda_y;
cudaMalloc(&cuda_x, size * sizeof(ScalarType));
cudaMalloc(&cuda_y, size * sizeof(ScalarType));
// Initialize with data
std::vector<ScalarType> host_x(size, 1.0);
std::vector<ScalarType> host_y(size, 2.0);
cudaMemcpy(cuda_x, &(host_x[0]), size * sizeof(ScalarType), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, &(host_y[0]), size * sizeof(ScalarType), cudaMemcpyHostToDevice);
// run kernel
my_inplace_add_kernel<<<128, 128>>>(cuda_x, cuda_y, static_cast<unsigned int>(1000));
// copy result back
std::vector<ScalarType> result_cuda(size);
cudaMemcpy(&(result_cuda[0]), cuda_x, size * sizeof(ScalarType), cudaMemcpyDeviceToHost);
std::cout << "Result with CUDA (native): ";
for (std::size_t i=0; i<size; ++i)
std::cout << result_cuda[i] << " ";
std::cout << std::endl;
//
// Part 2: Now do the same within ViennaCL
//
// wrap the existing CUDA buffers inside ViennaCL vectors
viennacl::vector<ScalarType> vcl_vec1(cuda_x, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
viennacl::vector<ScalarType> vcl_vec2(cuda_y, viennacl::CUDA_MEMORY, size); // Second parameter specifies that this is CUDA memory rather than host memory
// reset values to 0 and 1, respectively
vcl_vec1 = viennacl::scalar_vector<ScalarType>(size, ScalarType(1.0));
vcl_vec2 = viennacl::scalar_vector<ScalarType>(size, ScalarType(2.0));
vcl_vec1 += vcl_vec2;
std::cout << "Result with ViennaCL: " << vcl_vec1 << std::endl;
// ViennaCL does not automatically free your buffers (you're still the owner), so don't forget to clean up :-)
cudaFree(cuda_x);
cudaFree(cuda_y);
//
// That's it.
//
std::cout << "!!!! TUTORIAL COMPLETED SUCCESSFULLY !!!!" << std::endl;
return EXIT_SUCCESS;
}
|
41550181e4e2050de56ede96d255d46faeaaa3e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudf/column/column_device_view.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <stack>
#include <map>
#include <regex>
#include <random>
#include "interpreter_ops.cuh"
#include "parser/CalciteExpressionParsing.h"
#include "utilities/error.hpp"
#include <hiprand/hiprand_kernel.h>
namespace interops {
namespace detail {
struct allocate_device_scalar {
template <typename T, std::enable_if_t<not cudf::is_compound<T>()> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, hipStream_t stream = 0) {
using ScalarType = cudf::scalar_type_t<T>;
using ScalarDeviceType = cudf::scalar_device_type_t<T>;
rmm::device_buffer ret(sizeof(ScalarDeviceType), stream);
auto typed_scalar_ptr = static_cast<ScalarType *>(&s);
ScalarDeviceType h_scalar{typed_scalar_ptr->type(), typed_scalar_ptr->data(), typed_scalar_ptr->validity_data()};
CUDA_TRY(hipMemcpyAsync(ret.data(), &h_scalar, sizeof(ScalarDeviceType), hipMemcpyDefault, stream));
return ret;
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::string_view>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, hipStream_t stream = 0) {
using ScalarType = cudf::scalar_type_t<T>;
using ScalarDeviceType = cudf::scalar_device_type_t<T>;
rmm::device_buffer ret(sizeof(ScalarDeviceType), stream);
auto typed_scalar_ptr = static_cast<ScalarType *>(&s);
ScalarDeviceType h_scalar{typed_scalar_ptr->type(), typed_scalar_ptr->data(), typed_scalar_ptr->validity_data(), typed_scalar_ptr->size()};
CUDA_TRY(hipMemcpyAsync(ret.data(), &h_scalar, sizeof(ScalarDeviceType), hipMemcpyDefault, stream));
return ret;
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::dictionary32>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, hipStream_t stream = 0) {
RAL_FAIL("Dictionary not yet supported");
return rmm::device_buffer{};
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, hipStream_t stream = 0) {
RAL_FAIL("List not yet supported");
return rmm::device_buffer{};
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::struct_view>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, hipStream_t stream = 0) {
RAL_FAIL("Struct not yet supported");
return rmm::device_buffer{};
}
};
template <int SIZE, int REGISTER_SIZE>
int calculated_shared_memory(int num_threads_per_block) {
return SIZE * num_threads_per_block * REGISTER_SIZE;
}
// TODO: we dont know if this is fast or not we coudl store this in a pre computed map
void calculate_grid(int * min_grid_size, int * block_size, column_index_type max_output) {
if(max_output == 1) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<1, 8>, 0));
} else if(max_output == 2) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<2, 8>, 0));
} else if(max_output == 3) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<3, 8>, 0));
} else if(max_output == 4) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<4, 8>, 0));
} else if(max_output == 5) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<5, 8>, 0));
} else if(max_output == 6) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<6, 8>, 0));
} else if(max_output == 7) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<7, 8>, 0));
} else if(max_output == 8) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<8, 8>, 0));
} else if(max_output == 9) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<9, 8>, 0));
} else if(max_output == 10) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<10, 8>, 0));
} else if(max_output == 11) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<11, 8>, 0));
} else if(max_output == 12) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<12, 8>, 0));
} else if(max_output == 13) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<13, 8>, 0));
} else if(max_output == 14) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<14, 8>, 0));
} else if(max_output == 15) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<15, 8>, 0));
} else if(max_output == 16) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<16, 8>, 0));
} else if(max_output == 17) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<17, 8>, 0));
} else if(max_output == 18) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<18, 8>, 0));
} else if(max_output == 19) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<19, 8>, 0));
} else if(max_output == 20) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<20, 8>, 0));
} else if(max_output == 21) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<21, 8>, 0));
} else if(max_output == 22) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<22, 8>, 0));
} else if(max_output == 23) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<23, 8>, 0));
} else if(max_output == 24) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<24, 8>, 0));
} else if(max_output == 25) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<25, 8>, 0));
} else if(max_output == 26) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<26, 8>, 0));
} else if(max_output == 27) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<27, 8>, 0));
} else if(max_output == 28) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<28, 8>, 0));
} else if(max_output == 29) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<29, 8>, 0));
} else if(max_output == 30) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<30, 8>, 0));
} else if(max_output == 31) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<31, 8>, 0));
} else if(max_output == 32) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<32, 8>, 0));
} else if(max_output == 33) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<33, 8>, 0));
} else if(max_output == 34) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<34, 8>, 0));
} else if(max_output == 35) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<35, 8>, 0));
} else if(max_output == 36) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<36, 8>, 0));
} else if(max_output == 37) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<37, 8>, 0));
} else if(max_output == 38) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<38, 8>, 0));
} else if(max_output == 39) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<39, 8>, 0));
} else if(max_output == 40) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<40, 8>, 0));
} else if(max_output == 41) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<41, 8>, 0));
} else if(max_output == 42) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<42, 8>, 0));
} else if(max_output == 43) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<43, 8>, 0));
} else if(max_output == 44) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<44, 8>, 0));
} else if(max_output == 45) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<45, 8>, 0));
} else if(max_output == 46) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<46, 8>, 0));
} else if(max_output == 47) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<47, 8>, 0));
} else if(max_output == 48) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<48, 8>, 0));
} else if(max_output == 49) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<49, 8>, 0));
} else if(max_output == 50) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<50, 8>, 0));
} else if(max_output == 51) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<51, 8>, 0));
} else if(max_output == 52) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<52, 8>, 0));
} else if(max_output == 53) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<53, 8>, 0));
} else if(max_output == 54) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<54, 8>, 0));
} else if(max_output == 55) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<55, 8>, 0));
} else if(max_output == 56) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<56, 8>, 0));
} else if(max_output == 57) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<57, 8>, 0));
} else if(max_output == 58) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<58, 8>, 0));
} else if(max_output == 59) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<59, 8>, 0));
} else if(max_output == 60) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<60, 8>, 0));
} else if(max_output == 61) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<61, 8>, 0));
} else if(max_output == 62) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<62, 8>, 0));
} else if(max_output == 63) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<63, 8>, 0));
} else if(max_output == 64) {
CUDA_TRY(hipOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<64, 8>, 0));
}
}
} // namespace detail
struct expr_to_plan_visitor : public ral::parser::node_visitor
{
public:
expr_to_plan_visitor(const std::map<column_index_type, column_index_type> & expr_idx_to_col_idx_map,
cudf::size_type start_processing_position,
std::vector<column_index_type> & left_inputs,
std::vector<column_index_type> & right_inputs,
std::vector<column_index_type> & outputs,
std::vector<operator_type> & operators,
std::vector<std::unique_ptr<cudf::scalar>> & left_scalars,
std::vector<std::unique_ptr<cudf::scalar>> & right_scalars)
: expr_idx_to_col_idx_map{expr_idx_to_col_idx_map},
start_processing_position{start_processing_position},
left_inputs{left_inputs},
right_inputs{right_inputs},
outputs{outputs},
operators{operators},
left_scalars{left_scalars},
right_scalars{right_scalars},
processing_space_free_(512, true)
{
std::fill_n(processing_space_free_.begin(), start_processing_position, false);
}
void visit(const ral::parser::operad_node& node) override {
column_index_type position;
if (is_literal(node.value)) {
position = SCALAR_INDEX;
} else {
position = expr_idx_to_col_idx_map.at(static_cast<const ral::parser::variable_node&>(node).index());
}
node_to_processing_position_.insert({&node, position});
}
void visit(const ral::parser::operator_node& node) override {
operator_type operation = map_to_operator_type(node.value);
operators.push_back(operation);
if(is_binary_operator(operation)) {
const ral::parser::node * left_operand = node.children[0].get();
column_index_type left_position = node_to_processing_position_.at(left_operand);
if(left_operand->type != ral::parser::node_type::LITERAL) {
if(left_position >= start_processing_position) {
processing_space_free_[left_position] = true;
}
}
const ral::parser::node * right_operand = node.children[1].get();
column_index_type right_position = node_to_processing_position_.at(right_operand);
if(right_operand->type != ral::parser::node_type::LITERAL) {
if(right_position >= start_processing_position) {
processing_space_free_[right_position] = true;
}
}
if(left_operand->type == ral::parser::node_type::LITERAL && right_operand->type == ral::parser::node_type::LITERAL) {
RAL_FAIL("Operations between literals is not supported");
} else if(left_operand->type == ral::parser::node_type::LITERAL) {
auto literal_node = static_cast<const ral::parser::literal_node*>(left_operand);
std::unique_ptr<cudf::scalar> scalar_ptr;
if (!is_null(literal_node->value)) {
scalar_ptr = get_scalar_from_string(literal_node->value, literal_node->type());
}
left_inputs.push_back(scalar_ptr ? SCALAR_INDEX : SCALAR_NULL_INDEX);
right_inputs.push_back(right_position);
left_scalars.push_back(std::move(scalar_ptr));
right_scalars.emplace_back(nullptr);
} else if(right_operand->type == ral::parser::node_type::LITERAL) {
auto literal_node = static_cast<const ral::parser::literal_node*>(right_operand);
std::unique_ptr<cudf::scalar> scalar_ptr;
if (!is_null(literal_node->value)) {
scalar_ptr = get_scalar_from_string(literal_node->value, literal_node->type());
}
left_inputs.push_back(left_position);
right_inputs.push_back(scalar_ptr ? SCALAR_INDEX : SCALAR_NULL_INDEX);
left_scalars.emplace_back(nullptr);
right_scalars.push_back(std::move(scalar_ptr));
} else {
left_inputs.push_back(left_position);
right_inputs.push_back(right_position);
left_scalars.emplace_back(nullptr);
right_scalars.emplace_back(nullptr);
}
} else if(is_unary_operator(operation)) {
const ral::parser::node * left_operand = node.children[0].get();
RAL_EXPECTS(left_operand->type != ral::parser::node_type::LITERAL, "Unary operations on literals is not supported");
column_index_type left_position = node_to_processing_position_.at(left_operand);
if(left_position >= start_processing_position) {
processing_space_free_[left_position] = true;
}
left_inputs.push_back(left_position);
right_inputs.push_back(UNARY_INDEX);
left_scalars.emplace_back(nullptr);
right_scalars.emplace_back(nullptr);
}else{
left_inputs.push_back(NULLARY_INDEX);
right_inputs.push_back(NULLARY_INDEX);
left_scalars.emplace_back(nullptr);
right_scalars.emplace_back(nullptr);
}
column_index_type position = get_first_open_position(start_processing_position);
node_to_processing_position_.insert({&node, position});
outputs.push_back(position);
}
private:
column_index_type get_first_open_position(cudf::size_type start_position) {
assert(processing_space_free_.size() <= std::numeric_limits<column_index_type>().max());
for(size_t i = start_position; i < processing_space_free_.size(); i++) {
if(processing_space_free_[i]) {
processing_space_free_[i] = false;
return static_cast<column_index_type>(i);
}
}
return -1;
}
std::vector<bool> processing_space_free_; // A place to store whether or not a processing space is occupied at any point in time
std::map<const ral::parser::node*, column_index_type> node_to_processing_position_;
const std::map<column_index_type, column_index_type> & expr_idx_to_col_idx_map;
cudf::size_type start_processing_position;
std::vector<column_index_type> & left_inputs;
std::vector<column_index_type> & right_inputs;
std::vector<column_index_type> & outputs;
std::vector<operator_type> & operators;
std::vector<std::unique_ptr<cudf::scalar>> & left_scalars;
std::vector<std::unique_ptr<cudf::scalar>> & right_scalars;
};
/**
* Creates a physical plan for the expression that can be added to the total plan
*/
void add_expression_to_interpreter_plan(const ral::parser::parse_tree & expr_tree,
const std::map<column_index_type, column_index_type> & expr_idx_to_col_idx_map,
cudf::size_type start_processing_position,
cudf::size_type final_output_position,
std::vector<column_index_type> & left_inputs,
std::vector<column_index_type> & right_inputs,
std::vector<column_index_type> & outputs,
std::vector<operator_type> & operators,
std::vector<std::unique_ptr<cudf::scalar>> & left_scalars,
std::vector<std::unique_ptr<cudf::scalar>> & right_scalars) {
expr_to_plan_visitor visitor{expr_idx_to_col_idx_map,
start_processing_position,
left_inputs,
right_inputs,
outputs,
operators,
left_scalars,
right_scalars};
expr_tree.visit(visitor);
// Update final output position
outputs.back() = final_output_position;
}
void perform_interpreter_operation(cudf::mutable_table_view & out_table,
const cudf::table_view & table,
const std::vector<column_index_type> & left_inputs,
const std::vector<column_index_type> & right_inputs,
const std::vector<column_index_type> & outputs,
const std::vector<column_index_type> & final_output_positions,
const std::vector<operator_type> & operators,
const std::vector<std::unique_ptr<cudf::scalar>> & left_scalars,
const std::vector<std::unique_ptr<cudf::scalar>> & right_scalars,
cudf::size_type operation_num_rows) {
using namespace detail;
hipStream_t stream = 0;
if (final_output_positions.empty()) {
return;
}
assert(!left_inputs.empty());
assert(!right_inputs.empty());
assert(!outputs.empty());
assert(!operators.empty());
auto max_left_it = std::max_element(left_inputs.begin(), left_inputs.end());
auto max_right_it = std::max_element(right_inputs.begin(), right_inputs.end());
auto max_out_it = std::max_element(outputs.begin(), outputs.end());
RAL_EXPECTS(::max(::max(*max_left_it, *max_right_it), *max_out_it) < 64, "Interops does not support plans with an input or output index greater than 63");
column_index_type max_output = *max_out_it;
size_t shared_memory_per_thread = (max_output + 1) * sizeof(int64_t);
int min_grid_size, block_size;
calculate_grid(&min_grid_size, &block_size, max_output + 1);
size_t temp_valids_in_size = min_grid_size * block_size * table.num_columns() * sizeof(cudf::bitmask_type);
size_t temp_valids_out_size = min_grid_size * block_size * final_output_positions.size() * sizeof(cudf::bitmask_type);
rmm::device_buffer temp_device_valids_in_buffer(temp_valids_in_size, stream);
rmm::device_buffer temp_device_valids_out_buffer(temp_valids_out_size, stream);
// device table views
auto device_table_view = cudf::table_device_view::create(table, stream);
auto device_out_table_view = cudf::mutable_table_device_view::create(out_table, stream);
// device scalar views
std::vector<rmm::device_buffer> left_device_scalars_ptrs;
std::vector<cudf::detail::scalar_device_view_base *> left_device_scalars_raw;
std::vector<rmm::device_buffer> right_device_scalars_ptrs;
std::vector<cudf::detail::scalar_device_view_base *> right_device_scalars_raw;
for (size_t i = 0; i < left_scalars.size(); i++) {
left_device_scalars_ptrs.push_back(left_scalars[i] ? std::move(cudf::type_dispatcher(left_scalars[i]->type(), allocate_device_scalar{}, *(left_scalars[i]))) : rmm::device_buffer{});
left_device_scalars_raw.push_back(static_cast<cudf::detail::scalar_device_view_base *>(left_device_scalars_ptrs.back().data()));
right_device_scalars_ptrs.push_back(right_scalars[i] ? std::move(cudf::type_dispatcher(right_scalars[i]->type(), allocate_device_scalar{}, *(right_scalars[i]))) : rmm::device_buffer{});
right_device_scalars_raw.push_back(static_cast<cudf::detail::scalar_device_view_base *>(right_device_scalars_ptrs.back().data()));
}
rmm::device_vector<cudf::detail::scalar_device_view_base *> left_device_scalars(left_device_scalars_raw);
rmm::device_vector<cudf::detail::scalar_device_view_base *> right_device_scalars(right_device_scalars_raw);
// device left, right and output types
size_t num_operations = left_inputs.size();
std::vector<cudf::type_id> left_input_types_vec(num_operations);
std::vector<cudf::type_id> right_input_types_vec(num_operations);
std::vector<cudf::type_id> output_types_vec(num_operations);
std::map<column_index_type, cudf::type_id> output_map_type;
for(size_t i = 0; i < num_operations; i++) {
column_index_type left_index = left_inputs[i];
column_index_type right_index = right_inputs[i];
column_index_type output_index = outputs[i];
if(left_index >= 0 && left_index < table.num_columns()) {
left_input_types_vec[i] = table.column(left_index).type().id();
} else if(left_index == SCALAR_NULL_INDEX) {
left_input_types_vec[i] = cudf::type_id::EMPTY;
} else if(left_index == SCALAR_INDEX) {
left_input_types_vec[i] = left_scalars[i]->type().id();
} else if(left_index == UNARY_INDEX) {
// not possible
assert(false);
} else {
// have to get it from the output that generated it
left_input_types_vec[i] = output_map_type[left_index];
}
if(right_index >= 0 && right_index < table.num_columns()) {
right_input_types_vec[i] = table.column(right_index).type().id();
} else if(right_index == SCALAR_NULL_INDEX) {
right_input_types_vec[i] = cudf::type_id::EMPTY;
} else if(right_index == SCALAR_INDEX) {
right_input_types_vec[i] = right_scalars[i]->type().id();
} else if(right_index == UNARY_INDEX) {
// wont be used its a unary operation
right_input_types_vec[i] = cudf::type_id::EMPTY;
} else {
// have to get it from the output that generated it
right_input_types_vec[i] = output_map_type[right_index];
}
if(right_index == UNARY_INDEX){
output_types_vec[i] = get_output_type(operators[i], left_input_types_vec[i]);
}else if(right_index == NULLARY_INDEX){
output_types_vec[i] = get_output_type(operators[i]);
}else{
output_types_vec[i] = get_output_type(operators[i], left_input_types_vec[i], right_input_types_vec[i]);
}
output_map_type[output_index] = output_types_vec[i];
}
rmm::device_vector<cudf::type_id> left_device_input_types(left_input_types_vec);
rmm::device_vector<cudf::type_id> right_device_input_types(right_input_types_vec);
rmm::device_vector<column_index_type> left_device_inputs(left_inputs);
rmm::device_vector<column_index_type> right_device_inputs(right_inputs);
rmm::device_vector<column_index_type> device_outputs(outputs);
rmm::device_vector<column_index_type> final_device_output_positions(final_output_positions);
rmm::device_vector<operator_type> device_operators(operators);
InterpreterFunctor op(*device_out_table_view,
*device_table_view,
static_cast<cudf::size_type>(left_device_inputs.size()),
left_device_inputs.data().get(),
right_device_inputs.data().get(),
device_outputs.data().get(),
final_device_output_positions.data().get(),
left_device_input_types.data().get(),
right_device_input_types.data().get(),
device_operators.data().get(),
left_device_scalars.data().get(),
right_device_scalars.data().get(),
temp_device_valids_in_buffer.data(),
temp_device_valids_out_buffer.data());
rmm::device_vector<hiprandState_t> states(min_grid_size * block_size);
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<long long unsigned> distribution(0,0xFFFFFFFFFFFFFFFF);
unsigned long long seed = distribution(generator);
hipLaunchKernelGGL(( setup_rand_kernel), dim3(min_grid_size),
dim3( block_size),
shared_memory_per_thread * block_size,
stream, states.data().get(),seed);
if (operation_num_rows == 0){
operation_num_rows = table.num_rows();
}
hipLaunchKernelGGL(( transformKernel), dim3(min_grid_size),
dim3( block_size),
shared_memory_per_thread * block_size,
stream, op, operation_num_rows, states.data().get());
CUDA_TRY(hipStreamSynchronize(stream));
}
} // namespace interops
| 41550181e4e2050de56ede96d255d46faeaaa3e4.cu | #include <cudf/column/column_device_view.cuh>
#include <cudf/scalar/scalar.hpp>
#include <cudf/scalar/scalar_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <stack>
#include <map>
#include <regex>
#include <random>
#include "interpreter_ops.cuh"
#include "parser/CalciteExpressionParsing.h"
#include "utilities/error.hpp"
#include <curand_kernel.h>
namespace interops {
namespace detail {
struct allocate_device_scalar {
template <typename T, std::enable_if_t<not cudf::is_compound<T>()> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, cudaStream_t stream = 0) {
using ScalarType = cudf::scalar_type_t<T>;
using ScalarDeviceType = cudf::scalar_device_type_t<T>;
rmm::device_buffer ret(sizeof(ScalarDeviceType), stream);
auto typed_scalar_ptr = static_cast<ScalarType *>(&s);
ScalarDeviceType h_scalar{typed_scalar_ptr->type(), typed_scalar_ptr->data(), typed_scalar_ptr->validity_data()};
CUDA_TRY(cudaMemcpyAsync(ret.data(), &h_scalar, sizeof(ScalarDeviceType), cudaMemcpyDefault, stream));
return ret;
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::string_view>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, cudaStream_t stream = 0) {
using ScalarType = cudf::scalar_type_t<T>;
using ScalarDeviceType = cudf::scalar_device_type_t<T>;
rmm::device_buffer ret(sizeof(ScalarDeviceType), stream);
auto typed_scalar_ptr = static_cast<ScalarType *>(&s);
ScalarDeviceType h_scalar{typed_scalar_ptr->type(), typed_scalar_ptr->data(), typed_scalar_ptr->validity_data(), typed_scalar_ptr->size()};
CUDA_TRY(cudaMemcpyAsync(ret.data(), &h_scalar, sizeof(ScalarDeviceType), cudaMemcpyDefault, stream));
return ret;
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::dictionary32>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, cudaStream_t stream = 0) {
RAL_FAIL("Dictionary not yet supported");
return rmm::device_buffer{};
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, cudaStream_t stream = 0) {
RAL_FAIL("List not yet supported");
return rmm::device_buffer{};
}
template <typename T, std::enable_if_t<std::is_same<T, cudf::struct_view>::value> * = nullptr>
rmm::device_buffer operator()(cudf::scalar & s, cudaStream_t stream = 0) {
RAL_FAIL("Struct not yet supported");
return rmm::device_buffer{};
}
};
template <int SIZE, int REGISTER_SIZE>
int calculated_shared_memory(int num_threads_per_block) {
return SIZE * num_threads_per_block * REGISTER_SIZE;
}
// TODO: we dont know if this is fast or not we coudl store this in a pre computed map
void calculate_grid(int * min_grid_size, int * block_size, column_index_type max_output) {
if(max_output == 1) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<1, 8>, 0));
} else if(max_output == 2) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<2, 8>, 0));
} else if(max_output == 3) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<3, 8>, 0));
} else if(max_output == 4) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<4, 8>, 0));
} else if(max_output == 5) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<5, 8>, 0));
} else if(max_output == 6) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<6, 8>, 0));
} else if(max_output == 7) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<7, 8>, 0));
} else if(max_output == 8) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<8, 8>, 0));
} else if(max_output == 9) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<9, 8>, 0));
} else if(max_output == 10) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<10, 8>, 0));
} else if(max_output == 11) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<11, 8>, 0));
} else if(max_output == 12) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<12, 8>, 0));
} else if(max_output == 13) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<13, 8>, 0));
} else if(max_output == 14) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<14, 8>, 0));
} else if(max_output == 15) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<15, 8>, 0));
} else if(max_output == 16) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<16, 8>, 0));
} else if(max_output == 17) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<17, 8>, 0));
} else if(max_output == 18) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<18, 8>, 0));
} else if(max_output == 19) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<19, 8>, 0));
} else if(max_output == 20) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<20, 8>, 0));
} else if(max_output == 21) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<21, 8>, 0));
} else if(max_output == 22) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<22, 8>, 0));
} else if(max_output == 23) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<23, 8>, 0));
} else if(max_output == 24) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<24, 8>, 0));
} else if(max_output == 25) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<25, 8>, 0));
} else if(max_output == 26) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<26, 8>, 0));
} else if(max_output == 27) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<27, 8>, 0));
} else if(max_output == 28) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<28, 8>, 0));
} else if(max_output == 29) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<29, 8>, 0));
} else if(max_output == 30) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<30, 8>, 0));
} else if(max_output == 31) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<31, 8>, 0));
} else if(max_output == 32) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<32, 8>, 0));
} else if(max_output == 33) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<33, 8>, 0));
} else if(max_output == 34) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<34, 8>, 0));
} else if(max_output == 35) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<35, 8>, 0));
} else if(max_output == 36) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<36, 8>, 0));
} else if(max_output == 37) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<37, 8>, 0));
} else if(max_output == 38) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<38, 8>, 0));
} else if(max_output == 39) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<39, 8>, 0));
} else if(max_output == 40) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<40, 8>, 0));
} else if(max_output == 41) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<41, 8>, 0));
} else if(max_output == 42) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<42, 8>, 0));
} else if(max_output == 43) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<43, 8>, 0));
} else if(max_output == 44) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<44, 8>, 0));
} else if(max_output == 45) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<45, 8>, 0));
} else if(max_output == 46) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<46, 8>, 0));
} else if(max_output == 47) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<47, 8>, 0));
} else if(max_output == 48) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<48, 8>, 0));
} else if(max_output == 49) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<49, 8>, 0));
} else if(max_output == 50) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<50, 8>, 0));
} else if(max_output == 51) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<51, 8>, 0));
} else if(max_output == 52) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<52, 8>, 0));
} else if(max_output == 53) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<53, 8>, 0));
} else if(max_output == 54) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<54, 8>, 0));
} else if(max_output == 55) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<55, 8>, 0));
} else if(max_output == 56) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<56, 8>, 0));
} else if(max_output == 57) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<57, 8>, 0));
} else if(max_output == 58) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<58, 8>, 0));
} else if(max_output == 59) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<59, 8>, 0));
} else if(max_output == 60) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<60, 8>, 0));
} else if(max_output == 61) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<61, 8>, 0));
} else if(max_output == 62) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<62, 8>, 0));
} else if(max_output == 63) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<63, 8>, 0));
} else if(max_output == 64) {
CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size, block_size, transformKernel, calculated_shared_memory<64, 8>, 0));
}
}
} // namespace detail
struct expr_to_plan_visitor : public ral::parser::node_visitor
{
public:
expr_to_plan_visitor(const std::map<column_index_type, column_index_type> & expr_idx_to_col_idx_map,
cudf::size_type start_processing_position,
std::vector<column_index_type> & left_inputs,
std::vector<column_index_type> & right_inputs,
std::vector<column_index_type> & outputs,
std::vector<operator_type> & operators,
std::vector<std::unique_ptr<cudf::scalar>> & left_scalars,
std::vector<std::unique_ptr<cudf::scalar>> & right_scalars)
: expr_idx_to_col_idx_map{expr_idx_to_col_idx_map},
start_processing_position{start_processing_position},
left_inputs{left_inputs},
right_inputs{right_inputs},
outputs{outputs},
operators{operators},
left_scalars{left_scalars},
right_scalars{right_scalars},
processing_space_free_(512, true)
{
std::fill_n(processing_space_free_.begin(), start_processing_position, false);
}
void visit(const ral::parser::operad_node& node) override {
column_index_type position;
if (is_literal(node.value)) {
position = SCALAR_INDEX;
} else {
position = expr_idx_to_col_idx_map.at(static_cast<const ral::parser::variable_node&>(node).index());
}
node_to_processing_position_.insert({&node, position});
}
void visit(const ral::parser::operator_node& node) override {
operator_type operation = map_to_operator_type(node.value);
operators.push_back(operation);
if(is_binary_operator(operation)) {
const ral::parser::node * left_operand = node.children[0].get();
column_index_type left_position = node_to_processing_position_.at(left_operand);
if(left_operand->type != ral::parser::node_type::LITERAL) {
if(left_position >= start_processing_position) {
processing_space_free_[left_position] = true;
}
}
const ral::parser::node * right_operand = node.children[1].get();
column_index_type right_position = node_to_processing_position_.at(right_operand);
if(right_operand->type != ral::parser::node_type::LITERAL) {
if(right_position >= start_processing_position) {
processing_space_free_[right_position] = true;
}
}
if(left_operand->type == ral::parser::node_type::LITERAL && right_operand->type == ral::parser::node_type::LITERAL) {
RAL_FAIL("Operations between literals is not supported");
} else if(left_operand->type == ral::parser::node_type::LITERAL) {
auto literal_node = static_cast<const ral::parser::literal_node*>(left_operand);
std::unique_ptr<cudf::scalar> scalar_ptr;
if (!is_null(literal_node->value)) {
scalar_ptr = get_scalar_from_string(literal_node->value, literal_node->type());
}
left_inputs.push_back(scalar_ptr ? SCALAR_INDEX : SCALAR_NULL_INDEX);
right_inputs.push_back(right_position);
left_scalars.push_back(std::move(scalar_ptr));
right_scalars.emplace_back(nullptr);
} else if(right_operand->type == ral::parser::node_type::LITERAL) {
auto literal_node = static_cast<const ral::parser::literal_node*>(right_operand);
std::unique_ptr<cudf::scalar> scalar_ptr;
if (!is_null(literal_node->value)) {
scalar_ptr = get_scalar_from_string(literal_node->value, literal_node->type());
}
left_inputs.push_back(left_position);
right_inputs.push_back(scalar_ptr ? SCALAR_INDEX : SCALAR_NULL_INDEX);
left_scalars.emplace_back(nullptr);
right_scalars.push_back(std::move(scalar_ptr));
} else {
left_inputs.push_back(left_position);
right_inputs.push_back(right_position);
left_scalars.emplace_back(nullptr);
right_scalars.emplace_back(nullptr);
}
} else if(is_unary_operator(operation)) {
const ral::parser::node * left_operand = node.children[0].get();
RAL_EXPECTS(left_operand->type != ral::parser::node_type::LITERAL, "Unary operations on literals is not supported");
column_index_type left_position = node_to_processing_position_.at(left_operand);
if(left_position >= start_processing_position) {
processing_space_free_[left_position] = true;
}
left_inputs.push_back(left_position);
right_inputs.push_back(UNARY_INDEX);
left_scalars.emplace_back(nullptr);
right_scalars.emplace_back(nullptr);
}else{
left_inputs.push_back(NULLARY_INDEX);
right_inputs.push_back(NULLARY_INDEX);
left_scalars.emplace_back(nullptr);
right_scalars.emplace_back(nullptr);
}
column_index_type position = get_first_open_position(start_processing_position);
node_to_processing_position_.insert({&node, position});
outputs.push_back(position);
}
private:
column_index_type get_first_open_position(cudf::size_type start_position) {
assert(processing_space_free_.size() <= std::numeric_limits<column_index_type>().max());
for(size_t i = start_position; i < processing_space_free_.size(); i++) {
if(processing_space_free_[i]) {
processing_space_free_[i] = false;
return static_cast<column_index_type>(i);
}
}
return -1;
}
std::vector<bool> processing_space_free_; // A place to store whether or not a processing space is occupied at any point in time
std::map<const ral::parser::node*, column_index_type> node_to_processing_position_;
const std::map<column_index_type, column_index_type> & expr_idx_to_col_idx_map;
cudf::size_type start_processing_position;
std::vector<column_index_type> & left_inputs;
std::vector<column_index_type> & right_inputs;
std::vector<column_index_type> & outputs;
std::vector<operator_type> & operators;
std::vector<std::unique_ptr<cudf::scalar>> & left_scalars;
std::vector<std::unique_ptr<cudf::scalar>> & right_scalars;
};
/**
* Creates a physical plan for the expression that can be added to the total plan
*/
void add_expression_to_interpreter_plan(const ral::parser::parse_tree & expr_tree,
const std::map<column_index_type, column_index_type> & expr_idx_to_col_idx_map,
cudf::size_type start_processing_position,
cudf::size_type final_output_position,
std::vector<column_index_type> & left_inputs,
std::vector<column_index_type> & right_inputs,
std::vector<column_index_type> & outputs,
std::vector<operator_type> & operators,
std::vector<std::unique_ptr<cudf::scalar>> & left_scalars,
std::vector<std::unique_ptr<cudf::scalar>> & right_scalars) {
expr_to_plan_visitor visitor{expr_idx_to_col_idx_map,
start_processing_position,
left_inputs,
right_inputs,
outputs,
operators,
left_scalars,
right_scalars};
expr_tree.visit(visitor);
// Update final output position
outputs.back() = final_output_position;
}
void perform_interpreter_operation(cudf::mutable_table_view & out_table,
const cudf::table_view & table,
const std::vector<column_index_type> & left_inputs,
const std::vector<column_index_type> & right_inputs,
const std::vector<column_index_type> & outputs,
const std::vector<column_index_type> & final_output_positions,
const std::vector<operator_type> & operators,
const std::vector<std::unique_ptr<cudf::scalar>> & left_scalars,
const std::vector<std::unique_ptr<cudf::scalar>> & right_scalars,
cudf::size_type operation_num_rows) {
using namespace detail;
cudaStream_t stream = 0;
if (final_output_positions.empty()) {
return;
}
assert(!left_inputs.empty());
assert(!right_inputs.empty());
assert(!outputs.empty());
assert(!operators.empty());
auto max_left_it = std::max_element(left_inputs.begin(), left_inputs.end());
auto max_right_it = std::max_element(right_inputs.begin(), right_inputs.end());
auto max_out_it = std::max_element(outputs.begin(), outputs.end());
RAL_EXPECTS(std::max(std::max(*max_left_it, *max_right_it), *max_out_it) < 64, "Interops does not support plans with an input or output index greater than 63");
column_index_type max_output = *max_out_it;
size_t shared_memory_per_thread = (max_output + 1) * sizeof(int64_t);
int min_grid_size, block_size;
calculate_grid(&min_grid_size, &block_size, max_output + 1);
size_t temp_valids_in_size = min_grid_size * block_size * table.num_columns() * sizeof(cudf::bitmask_type);
size_t temp_valids_out_size = min_grid_size * block_size * final_output_positions.size() * sizeof(cudf::bitmask_type);
rmm::device_buffer temp_device_valids_in_buffer(temp_valids_in_size, stream);
rmm::device_buffer temp_device_valids_out_buffer(temp_valids_out_size, stream);
// device table views
auto device_table_view = cudf::table_device_view::create(table, stream);
auto device_out_table_view = cudf::mutable_table_device_view::create(out_table, stream);
// device scalar views
std::vector<rmm::device_buffer> left_device_scalars_ptrs;
std::vector<cudf::detail::scalar_device_view_base *> left_device_scalars_raw;
std::vector<rmm::device_buffer> right_device_scalars_ptrs;
std::vector<cudf::detail::scalar_device_view_base *> right_device_scalars_raw;
for (size_t i = 0; i < left_scalars.size(); i++) {
left_device_scalars_ptrs.push_back(left_scalars[i] ? std::move(cudf::type_dispatcher(left_scalars[i]->type(), allocate_device_scalar{}, *(left_scalars[i]))) : rmm::device_buffer{});
left_device_scalars_raw.push_back(static_cast<cudf::detail::scalar_device_view_base *>(left_device_scalars_ptrs.back().data()));
right_device_scalars_ptrs.push_back(right_scalars[i] ? std::move(cudf::type_dispatcher(right_scalars[i]->type(), allocate_device_scalar{}, *(right_scalars[i]))) : rmm::device_buffer{});
right_device_scalars_raw.push_back(static_cast<cudf::detail::scalar_device_view_base *>(right_device_scalars_ptrs.back().data()));
}
rmm::device_vector<cudf::detail::scalar_device_view_base *> left_device_scalars(left_device_scalars_raw);
rmm::device_vector<cudf::detail::scalar_device_view_base *> right_device_scalars(right_device_scalars_raw);
// device left, right and output types
size_t num_operations = left_inputs.size();
std::vector<cudf::type_id> left_input_types_vec(num_operations);
std::vector<cudf::type_id> right_input_types_vec(num_operations);
std::vector<cudf::type_id> output_types_vec(num_operations);
std::map<column_index_type, cudf::type_id> output_map_type;
for(size_t i = 0; i < num_operations; i++) {
column_index_type left_index = left_inputs[i];
column_index_type right_index = right_inputs[i];
column_index_type output_index = outputs[i];
if(left_index >= 0 && left_index < table.num_columns()) {
left_input_types_vec[i] = table.column(left_index).type().id();
} else if(left_index == SCALAR_NULL_INDEX) {
left_input_types_vec[i] = cudf::type_id::EMPTY;
} else if(left_index == SCALAR_INDEX) {
left_input_types_vec[i] = left_scalars[i]->type().id();
} else if(left_index == UNARY_INDEX) {
// not possible
assert(false);
} else {
// have to get it from the output that generated it
left_input_types_vec[i] = output_map_type[left_index];
}
if(right_index >= 0 && right_index < table.num_columns()) {
right_input_types_vec[i] = table.column(right_index).type().id();
} else if(right_index == SCALAR_NULL_INDEX) {
right_input_types_vec[i] = cudf::type_id::EMPTY;
} else if(right_index == SCALAR_INDEX) {
right_input_types_vec[i] = right_scalars[i]->type().id();
} else if(right_index == UNARY_INDEX) {
// wont be used its a unary operation
right_input_types_vec[i] = cudf::type_id::EMPTY;
} else {
// have to get it from the output that generated it
right_input_types_vec[i] = output_map_type[right_index];
}
if(right_index == UNARY_INDEX){
output_types_vec[i] = get_output_type(operators[i], left_input_types_vec[i]);
}else if(right_index == NULLARY_INDEX){
output_types_vec[i] = get_output_type(operators[i]);
}else{
output_types_vec[i] = get_output_type(operators[i], left_input_types_vec[i], right_input_types_vec[i]);
}
output_map_type[output_index] = output_types_vec[i];
}
rmm::device_vector<cudf::type_id> left_device_input_types(left_input_types_vec);
rmm::device_vector<cudf::type_id> right_device_input_types(right_input_types_vec);
rmm::device_vector<column_index_type> left_device_inputs(left_inputs);
rmm::device_vector<column_index_type> right_device_inputs(right_inputs);
rmm::device_vector<column_index_type> device_outputs(outputs);
rmm::device_vector<column_index_type> final_device_output_positions(final_output_positions);
rmm::device_vector<operator_type> device_operators(operators);
InterpreterFunctor op(*device_out_table_view,
*device_table_view,
static_cast<cudf::size_type>(left_device_inputs.size()),
left_device_inputs.data().get(),
right_device_inputs.data().get(),
device_outputs.data().get(),
final_device_output_positions.data().get(),
left_device_input_types.data().get(),
right_device_input_types.data().get(),
device_operators.data().get(),
left_device_scalars.data().get(),
right_device_scalars.data().get(),
temp_device_valids_in_buffer.data(),
temp_device_valids_out_buffer.data());
rmm::device_vector<curandState> states(min_grid_size * block_size);
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_int_distribution<long long unsigned> distribution(0,0xFFFFFFFFFFFFFFFF);
unsigned long long seed = distribution(generator);
setup_rand_kernel<<<min_grid_size,
block_size,
shared_memory_per_thread * block_size,
stream>>>(states.data().get(),seed);
if (operation_num_rows == 0){
operation_num_rows = table.num_rows();
}
transformKernel<<<min_grid_size,
block_size,
shared_memory_per_thread * block_size,
stream>>>(op, operation_num_rows, states.data().get());
CUDA_TRY(cudaStreamSynchronize(stream));
}
} // namespace interops
|
0d817c78a044b9bc7bf55823eff55182b23001ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#define imin(a,b)((a<b)?a:b)
const int N =33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+ threadsPerBlock-1) / threadsPerBlock);
__global__ void kernel(float*a,float*b,float*c)
{
__shared__ float cache[threadsPerBlock];//shared between threads of a block. we have 32 blocks in this example thus, we will have 32 varialbes created for cache each of size 256*float bytes.
float temp = 0;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int cacheIndex = threadIdx.x;
while (tid < N)
{
temp += a[tid]*b[tid];
tid += gridDim.x*blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i!=0)
{
if(cacheIndex<i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
//each cache element in all the 32 blocks returning their values at index 0
printf("blockIdx.x= %d ,threadIdx.x=%d , cache=%f \n", blockIdx.x, threadIdx.x, cache[0]);
}
}
int main()
{
float* h_a = new float[N];
float* h_b = new float[N];
float* h_c = new float[blocksPerGrid];
float* d_a;
float* d_b;
float* d_c;
hipMalloc((void**)&d_a, sizeof(float)*N);
hipMalloc((void**)&d_b, sizeof(float)*N);
hipMalloc((void**)&d_c, sizeof(float)*blocksPerGrid);
for (int i = 0; i < N; i++)
{
h_a[i] = i;
h_b[i] = 2 * i;
}
hipMemcpy(d_a, h_a, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(float)*N, hipMemcpyHostToDevice);
kernel << <blocksPerGrid, threadsPerBlock >> >(d_a,d_b,d_c);
hipMemcpy(h_c, d_c, sizeof(float)*blocksPerGrid, hipMemcpyDeviceToHost);
//Output h_c
//This partial sum calculated on CPU, better than wasting GPU resources
float c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += h_c[i];
}
//CPU sum for verification and compare
#define sumSquares(x) (x*(x+1)*(2*x+1)/6)
printf("Does GPU value %.6g == %.6g\n", c,2*sumSquares((float) (N-1)));
//Delete all memroAllocs
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
delete [] h_a;
delete [] h_b;
delete[] h_c;
return 911;
}*/ | 0d817c78a044b9bc7bf55823eff55182b23001ba.cu | /*#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include "device_launch_parameters.h"
#include <device_functions.h>
#define imin(a,b)((a<b)?a:b)
const int N =33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+ threadsPerBlock-1) / threadsPerBlock);
__global__ void kernel(float*a,float*b,float*c)
{
__shared__ float cache[threadsPerBlock];//shared between threads of a block. we have 32 blocks in this example thus, we will have 32 varialbes created for cache each of size 256*float bytes.
float temp = 0;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int cacheIndex = threadIdx.x;
while (tid < N)
{
temp += a[tid]*b[tid];
tid += gridDim.x*blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i!=0)
{
if(cacheIndex<i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
//each cache element in all the 32 blocks returning their values at index 0
printf("blockIdx.x= %d ,threadIdx.x=%d , cache=%f \n", blockIdx.x, threadIdx.x, cache[0]);
}
}
int main()
{
float* h_a = new float[N];
float* h_b = new float[N];
float* h_c = new float[blocksPerGrid];
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((void**)&d_a, sizeof(float)*N);
cudaMalloc((void**)&d_b, sizeof(float)*N);
cudaMalloc((void**)&d_c, sizeof(float)*blocksPerGrid);
for (int i = 0; i < N; i++)
{
h_a[i] = i;
h_b[i] = 2 * i;
}
cudaMemcpy(d_a, h_a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*N, cudaMemcpyHostToDevice);
kernel << <blocksPerGrid, threadsPerBlock >> >(d_a,d_b,d_c);
cudaMemcpy(h_c, d_c, sizeof(float)*blocksPerGrid, cudaMemcpyDeviceToHost);
//Output h_c
//This partial sum calculated on CPU, better than wasting GPU resources
float c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += h_c[i];
}
//CPU sum for verification and compare
#define sumSquares(x) (x*(x+1)*(2*x+1)/6)
printf("Does GPU value %.6g == %.6g\n", c,2*sumSquares((float) (N-1)));
//Delete all memroAllocs
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete [] h_a;
delete [] h_b;
delete[] h_c;
return 911;
}*/ |
a1e04fed298abea1954657588576b185ddd577bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "initialize_cells.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
CellT *dev_cells = NULL;
hipMalloc(&dev_cells, XSIZE*YSIZE);
CellT *dev_next_cells = NULL;
hipMalloc(&dev_next_cells, XSIZE*YSIZE);
int size_x = XSIZE*YSIZE;
int size_y = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
initialize_cells), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_cells,dev_next_cells,size_x,size_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
initialize_cells), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_cells,dev_next_cells,size_x,size_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
initialize_cells), dim3(gridBlock),dim3(threadBlock), 0, 0, dev_cells,dev_next_cells,size_x,size_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a1e04fed298abea1954657588576b185ddd577bd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "initialize_cells.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
CellT *dev_cells = NULL;
cudaMalloc(&dev_cells, XSIZE*YSIZE);
CellT *dev_next_cells = NULL;
cudaMalloc(&dev_next_cells, XSIZE*YSIZE);
int size_x = XSIZE*YSIZE;
int size_y = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
initialize_cells<<<gridBlock,threadBlock>>>(dev_cells,dev_next_cells,size_x,size_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
initialize_cells<<<gridBlock,threadBlock>>>(dev_cells,dev_next_cells,size_x,size_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
initialize_cells<<<gridBlock,threadBlock>>>(dev_cells,dev_next_cells,size_x,size_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7c5bf93b123a01f3cc484a803f26957775fe2cd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// DO NOT MODIFY THIS FILE
#include <ctime>
#include <cstdio>
#include <cstdint>
#include <chrono>
#include "cuda_utils.h"
#include "apsp.h"
namespace
{
constexpr int DATA_RANGE = 10000;
constexpr int TIMER_ROUNDS = 3;
constexpr int TIMER_WARMUP = 1;
__global__ void __launch_bounds__(1024) genDataKernel(int n, int seed, int *data)
{
int64_t x = seed;
x = (x * 179424673 + blockIdx.x + 275604541) % DATA_RANGE;
x = (x * 373587883 + threadIdx.x + 472882027) % DATA_RANGE;
x = (x * 179424673 + blockIdx.y + 275604541) % DATA_RANGE;
x = (x * 373587883 + threadIdx.y + 472882027) % DATA_RANGE;
auto i = blockIdx.y * 32 + threadIdx.y;
auto j = blockIdx.x * 32 + threadIdx.x;
if (i < n && j < n)
{
data[i * n + j] = i == j ? 0 : x;
}
}
__global__ void __launch_bounds__(1024) compareKernel(int n, int *data1, int *data2, int *diff)
{
auto i = blockIdx.x * 1024 + threadIdx.x;
if (i < n)
{
if (data1[i] != data2[i])
{
atomicAdd(diff, 1);
}
}
}
/* device */ int *allocGraph(int n)
{
int *data;
CHK_CUDA_ERR(hipMalloc(&data, n * n * sizeof(int)));
return data;
}
void copyGraph(int n, /* device */ int *dst, /* device */ const int *src)
{
CHK_CUDA_ERR(hipMemcpy(dst, src, n * n * sizeof(int), hipMemcpyDefault));
}
/* device */ int *genData(int n)
{
int *data = allocGraph(n);
dim3 thr(32, 32);
dim3 blk((n - 1) / 32 + 1, (n - 1) / 32 + 1);
hipLaunchKernelGGL(( genDataKernel), dim3(blk), dim3(thr), 0, 0, n, time(0), data);
CHK_CUDA_ERR(hipDeviceSynchronize());
return data;
}
} // Anonymous namespace
int main(int argc, char **argv)
{
int n;
if (argc != 2 || sscanf(argv[1], "%d", &n) != 1)
{
printf("Usage: %s <graph_size>", argv[0]);
exit(-1);
}
int *data = genData(n);
int *result = allocGraph(n);
for (int i = 0; i < TIMER_WARMUP; i++)
{
copyGraph(n, result, data);
apsp(n, result);
CHK_CUDA_ERR(hipDeviceSynchronize());
}
double t = 0;
for (int i = 0; i < TIMER_ROUNDS; i++)
{
namespace ch = std::chrono;
copyGraph(n, result, data);
auto beg = ch::high_resolution_clock::now();
apsp(n, result);
auto err = hipDeviceSynchronize();
auto end = ch::high_resolution_clock::now();
if (err != hipSuccess)
{
fprintf(stderr, "CUDA Error\n");
exit(-1);
}
t += ch::duration_cast<ch::duration<double>>(end - beg).count() * 1000; // ms
}
t /= TIMER_ROUNDS;
int *ref = allocGraph(n);
copyGraph(n, ref, data);
apspRef(n, ref);
CHK_CUDA_ERR(hipDeviceSynchronize());
int *diff, diffHost = 0;
CHK_CUDA_ERR(hipMalloc(&diff, sizeof(int)));
CHK_CUDA_ERR(hipMemcpy(diff, &diffHost, sizeof(int), hipMemcpyDefault));
hipLaunchKernelGGL(( compareKernel), dim3((n * n - 1) / 1024 + 1), dim3(1024), 0, 0, n * n, result, ref, diff);
CHK_CUDA_ERR(hipMemcpy(&diffHost, diff, sizeof(int), hipMemcpyDefault));
if (diffHost == 0)
{
printf("Validation Passed\n");
}
else
{
printf("WRONG ANSWER!!!\nDifference = %d\n", diffHost);
}
printf("Time: %f ms\n", t);
}
| 7c5bf93b123a01f3cc484a803f26957775fe2cd6.cu | // DO NOT MODIFY THIS FILE
#include <ctime>
#include <cstdio>
#include <cstdint>
#include <chrono>
#include "cuda_utils.h"
#include "apsp.h"
namespace
{
constexpr int DATA_RANGE = 10000;
constexpr int TIMER_ROUNDS = 3;
constexpr int TIMER_WARMUP = 1;
__global__ void __launch_bounds__(1024) genDataKernel(int n, int seed, int *data)
{
int64_t x = seed;
x = (x * 179424673 + blockIdx.x + 275604541) % DATA_RANGE;
x = (x * 373587883 + threadIdx.x + 472882027) % DATA_RANGE;
x = (x * 179424673 + blockIdx.y + 275604541) % DATA_RANGE;
x = (x * 373587883 + threadIdx.y + 472882027) % DATA_RANGE;
auto i = blockIdx.y * 32 + threadIdx.y;
auto j = blockIdx.x * 32 + threadIdx.x;
if (i < n && j < n)
{
data[i * n + j] = i == j ? 0 : x;
}
}
__global__ void __launch_bounds__(1024) compareKernel(int n, int *data1, int *data2, int *diff)
{
auto i = blockIdx.x * 1024 + threadIdx.x;
if (i < n)
{
if (data1[i] != data2[i])
{
atomicAdd(diff, 1);
}
}
}
/* device */ int *allocGraph(int n)
{
int *data;
CHK_CUDA_ERR(cudaMalloc(&data, n * n * sizeof(int)));
return data;
}
void copyGraph(int n, /* device */ int *dst, /* device */ const int *src)
{
CHK_CUDA_ERR(cudaMemcpy(dst, src, n * n * sizeof(int), cudaMemcpyDefault));
}
/* device */ int *genData(int n)
{
int *data = allocGraph(n);
dim3 thr(32, 32);
dim3 blk((n - 1) / 32 + 1, (n - 1) / 32 + 1);
genDataKernel<<<blk, thr>>>(n, time(0), data);
CHK_CUDA_ERR(cudaDeviceSynchronize());
return data;
}
} // Anonymous namespace
int main(int argc, char **argv)
{
int n;
if (argc != 2 || sscanf(argv[1], "%d", &n) != 1)
{
printf("Usage: %s <graph_size>", argv[0]);
exit(-1);
}
int *data = genData(n);
int *result = allocGraph(n);
for (int i = 0; i < TIMER_WARMUP; i++)
{
copyGraph(n, result, data);
apsp(n, result);
CHK_CUDA_ERR(cudaDeviceSynchronize());
}
double t = 0;
for (int i = 0; i < TIMER_ROUNDS; i++)
{
namespace ch = std::chrono;
copyGraph(n, result, data);
auto beg = ch::high_resolution_clock::now();
apsp(n, result);
auto err = cudaDeviceSynchronize();
auto end = ch::high_resolution_clock::now();
if (err != cudaSuccess)
{
fprintf(stderr, "CUDA Error\n");
exit(-1);
}
t += ch::duration_cast<ch::duration<double>>(end - beg).count() * 1000; // ms
}
t /= TIMER_ROUNDS;
int *ref = allocGraph(n);
copyGraph(n, ref, data);
apspRef(n, ref);
CHK_CUDA_ERR(cudaDeviceSynchronize());
int *diff, diffHost = 0;
CHK_CUDA_ERR(cudaMalloc(&diff, sizeof(int)));
CHK_CUDA_ERR(cudaMemcpy(diff, &diffHost, sizeof(int), cudaMemcpyDefault));
compareKernel<<<(n * n - 1) / 1024 + 1, 1024>>>(n * n, result, ref, diff);
CHK_CUDA_ERR(cudaMemcpy(&diffHost, diff, sizeof(int), cudaMemcpyDefault));
if (diffHost == 0)
{
printf("Validation Passed\n");
}
else
{
printf("WRONG ANSWER!!!\nDifference = %d\n", diffHost);
}
printf("Time: %f ms\n", t);
}
|
602495d6b7874f5bf3600aadf4dccaed5ff385b3.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 - 2021 MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <torch/extension.h>
#include "filtering/permutohedral/permutohedral.h"
#include "utils/meta_macros.h"
#include "utils/tensor_description.h"
__constant__ int cBatchStride;
__constant__ int cChannelStride;
__constant__ int cSpatialStrides[3];
__constant__ float cInvSpatialSigma;
__constant__ float cInvColorSigma;
template <typename scalar_t, int C, int D>
__global__ void FeatureCreation(const scalar_t* inputTensor, scalar_t* outputData, scalar_t* outputFeatures) {
int elementIndex = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y;
int dataBatchOffset = batchIndex * cBatchStride;
int featureBatchOffset = batchIndex * (D + C) * cChannelStride;
#pragma unroll
for (int i = 0; i < C; i++) {
outputData[dataBatchOffset + elementIndex * C + i] =
inputTensor[dataBatchOffset + elementIndex + i * cChannelStride];
outputFeatures[featureBatchOffset + elementIndex * (C + D) + i] =
inputTensor[dataBatchOffset + elementIndex + i * cChannelStride] * cInvColorSigma;
}
int remainder = elementIndex;
#pragma unroll
for (int i = 0; i < D; i++) {
int coord = remainder / cSpatialStrides[i];
remainder -= coord * cSpatialStrides[i];
outputFeatures[featureBatchOffset + elementIndex * (C + D) + C + i] = coord * cInvSpatialSigma;
}
}
template <typename scalar_t, int C>
__global__ void WriteOutput(const scalar_t* data, scalar_t* outputTensor) {
int elementIndex = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y;
int batchOffset = batchIndex * cBatchStride;
#pragma unroll
for (int i = 0; i < C; i++) {
outputTensor[batchOffset + elementIndex + i * cChannelStride] = data[batchOffset + elementIndex * C + i];
}
}
template <typename scalar_t, int C, int D>
void BilateralFilterPHLCuda(
torch::Tensor inputTensor,
torch::Tensor outputTensor,
float spatialSigma,
float colorSigma) {
// Getting tensor description.
TensorDescription desc = TensorDescription(inputTensor);
int featureChannelCount = desc.channelCount + desc.dimensions;
// Pre calculating inverse sigmas.
float invSpatialSigma = 1.0f / spatialSigma;
float invColorSigma = 1.0f / colorSigma;
// Preparing global memory
scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
scalar_t* data;
scalar_t* features;
hipMalloc(&data, desc.batchCount * desc.channelStride * desc.channelCount * sizeof(scalar_t));
hipMalloc(&features, desc.batchCount * desc.channelStride * featureChannelCount * sizeof(scalar_t));
// Prparing constant memory
hipMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int));
hipMemcpyToSymbol(cChannelStride, &desc.channelStride, sizeof(int));
hipMemcpyToSymbol(cSpatialStrides, desc.strides, sizeof(int) * desc.dimensions);
hipMemcpyToSymbol(cInvSpatialSigma, &invSpatialSigma, sizeof(float));
hipMemcpyToSymbol(cInvColorSigma, &invColorSigma, sizeof(float));
// Creating features
hipLaunchKernelGGL(( FeatureCreation<scalar_t, C, D>)
, dim3(dim3(desc.channelStride, desc.batchCount)), dim3(dim3(1, 1)), 0, 0, inputTensorData, data, features);
// Filtering data with respect to the features for each sample in batch
for (int batchIndex = 0; batchIndex < desc.batchCount; batchIndex++) {
scalar_t* offsetData = data + batchIndex * desc.batchStride;
scalar_t* offsetFeatures = features + batchIndex * featureChannelCount * desc.channelStride;
PermutohedralCuda<scalar_t, C, C + D>(offsetData, offsetFeatures, desc.channelStride, true);
}
// Writing output
hipLaunchKernelGGL(( WriteOutput<scalar_t, C>), dim3(dim3(desc.channelStride, desc.batchCount)), dim3(dim3(1, 1)), 0, 0, data, outputTensorData);
hipFree(data);
hipFree(features);
}
// Function to choose template implementation based on dynamic, channels and dimensions
torch::Tensor BilateralFilterPHLCuda(torch::Tensor inputTensor, float spatialSigma, float colorSigma) {
torch::Tensor outputTensor = torch::zeros_like(inputTensor);
#define CASE(c, d) \
AT_DISPATCH_FLOATING_TYPES(inputTensor.type(), "BilateralFilterCudaPHL", ([&] { \
BilateralFilterPHLCuda<scalar_t, c, d>( \
inputTensor, outputTensor, spatialSigma, colorSigma); \
}));
SWITCH_AB(CASE, 16, 3, inputTensor.size(1), inputTensor.dim() - 2);
return outputTensor;
}
| 602495d6b7874f5bf3600aadf4dccaed5ff385b3.cu | /*
Copyright 2020 - 2021 MONAI Consortium
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
#include "filtering/permutohedral/permutohedral.h"
#include "utils/meta_macros.h"
#include "utils/tensor_description.h"
__constant__ int cBatchStride;
__constant__ int cChannelStride;
__constant__ int cSpatialStrides[3];
__constant__ float cInvSpatialSigma;
__constant__ float cInvColorSigma;
template <typename scalar_t, int C, int D>
__global__ void FeatureCreation(const scalar_t* inputTensor, scalar_t* outputData, scalar_t* outputFeatures) {
int elementIndex = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y;
int dataBatchOffset = batchIndex * cBatchStride;
int featureBatchOffset = batchIndex * (D + C) * cChannelStride;
#pragma unroll
for (int i = 0; i < C; i++) {
outputData[dataBatchOffset + elementIndex * C + i] =
inputTensor[dataBatchOffset + elementIndex + i * cChannelStride];
outputFeatures[featureBatchOffset + elementIndex * (C + D) + i] =
inputTensor[dataBatchOffset + elementIndex + i * cChannelStride] * cInvColorSigma;
}
int remainder = elementIndex;
#pragma unroll
for (int i = 0; i < D; i++) {
int coord = remainder / cSpatialStrides[i];
remainder -= coord * cSpatialStrides[i];
outputFeatures[featureBatchOffset + elementIndex * (C + D) + C + i] = coord * cInvSpatialSigma;
}
}
template <typename scalar_t, int C>
__global__ void WriteOutput(const scalar_t* data, scalar_t* outputTensor) {
int elementIndex = blockIdx.x * blockDim.x + threadIdx.x;
int batchIndex = blockIdx.y;
int batchOffset = batchIndex * cBatchStride;
#pragma unroll
for (int i = 0; i < C; i++) {
outputTensor[batchOffset + elementIndex + i * cChannelStride] = data[batchOffset + elementIndex * C + i];
}
}
template <typename scalar_t, int C, int D>
void BilateralFilterPHLCuda(
torch::Tensor inputTensor,
torch::Tensor outputTensor,
float spatialSigma,
float colorSigma) {
// Getting tensor description.
TensorDescription desc = TensorDescription(inputTensor);
int featureChannelCount = desc.channelCount + desc.dimensions;
// Pre calculating inverse sigmas.
float invSpatialSigma = 1.0f / spatialSigma;
float invColorSigma = 1.0f / colorSigma;
// Preparing global memory
scalar_t* inputTensorData = inputTensor.data_ptr<scalar_t>();
scalar_t* outputTensorData = outputTensor.data_ptr<scalar_t>();
scalar_t* data;
scalar_t* features;
cudaMalloc(&data, desc.batchCount * desc.channelStride * desc.channelCount * sizeof(scalar_t));
cudaMalloc(&features, desc.batchCount * desc.channelStride * featureChannelCount * sizeof(scalar_t));
// Prparing constant memory
cudaMemcpyToSymbol(cBatchStride, &desc.batchStride, sizeof(int));
cudaMemcpyToSymbol(cChannelStride, &desc.channelStride, sizeof(int));
cudaMemcpyToSymbol(cSpatialStrides, desc.strides, sizeof(int) * desc.dimensions);
cudaMemcpyToSymbol(cInvSpatialSigma, &invSpatialSigma, sizeof(float));
cudaMemcpyToSymbol(cInvColorSigma, &invColorSigma, sizeof(float));
// Creating features
FeatureCreation<scalar_t, C, D>
<<<dim3(desc.channelStride, desc.batchCount), dim3(1, 1)>>>(inputTensorData, data, features);
// Filtering data with respect to the features for each sample in batch
for (int batchIndex = 0; batchIndex < desc.batchCount; batchIndex++) {
scalar_t* offsetData = data + batchIndex * desc.batchStride;
scalar_t* offsetFeatures = features + batchIndex * featureChannelCount * desc.channelStride;
PermutohedralCuda<scalar_t, C, C + D>(offsetData, offsetFeatures, desc.channelStride, true);
}
// Writing output
WriteOutput<scalar_t, C><<<dim3(desc.channelStride, desc.batchCount), dim3(1, 1)>>>(data, outputTensorData);
cudaFree(data);
cudaFree(features);
}
// Function to choose template implementation based on dynamic, channels and dimensions
torch::Tensor BilateralFilterPHLCuda(torch::Tensor inputTensor, float spatialSigma, float colorSigma) {
torch::Tensor outputTensor = torch::zeros_like(inputTensor);
#define CASE(c, d) \
AT_DISPATCH_FLOATING_TYPES(inputTensor.type(), "BilateralFilterCudaPHL", ([&] { \
BilateralFilterPHLCuda<scalar_t, c, d>( \
inputTensor, outputTensor, spatialSigma, colorSigma); \
}));
SWITCH_AB(CASE, 16, 3, inputTensor.size(1), inputTensor.dim() - 2);
return outputTensor;
}
|
79cf6cbd437ee9318c59f7e972e3e7e986cb96c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void elementwise_add(const int * array1,
const int * array2, int * result, int size) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
while (idx < size) {
result[idx] = array1[idx] + array2[idx];
idx += stride;
}
}
| 79cf6cbd437ee9318c59f7e972e3e7e986cb96c0.cu | __global__ void elementwise_add(const int * array1,
const int * array2, int * result, int size) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = gridDim.x * blockDim.x;
while (idx < size) {
result[idx] = array1[idx] + array2[idx];
idx += stride;
}
}
|
de3fc28d18123a2949f70d7e2d1cf8bb92212fb6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CAHitNtupletGeneratorKernelsImpl.h"
template <>
void CAHitNtupletGeneratorKernelsGPU::fillHitDetIndices(HitsView const *hv, TkSoA *tracks_d, hipStream_t cudaStream) {
auto blockSize = 128;
auto numberOfBlocks = (HitContainer::capacity() + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_fillHitDetIndices), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
&tracks_d->hitIndices, hv, &tracks_d->detIndices);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
}
template <>
void CAHitNtupletGeneratorKernelsGPU::launchKernels(HitsOnCPU const &hh, TkSoA *tracks_d, hipStream_t cudaStream) {
// these are pointer on GPU!
auto *tuples_d = &tracks_d->hitIndices;
auto *quality_d = (Quality *)(&tracks_d->m_quality);
// zero tuples
cms::cuda::launchZero(tuples_d, cudaStream);
auto nhits = hh.nHits();
assert(nhits <= pixelGPUConstants::maxNumberOfHits);
// std::cout << "N hits " << nhits << std::endl;
// if (nhits<2) std::cout << "too few hits " << nhits << std::endl;
//
// applying conbinatoric cleaning such as fishbone at this stage is too expensive
//
auto nthTot = 64;
auto stride = 4;
auto blockSize = nthTot / stride;
auto numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
auto rescale = numberOfBlocks / 65536;
blockSize *= (rescale + 1);
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
assert(numberOfBlocks < 65536);
assert(blockSize > 0 && 0 == blockSize % 16);
dim3 blks(1, numberOfBlocks, 1);
dim3 thrs(stride, blockSize, 1);
hipLaunchKernelGGL(( kernel_connect), dim3(blks), dim3(thrs), 0, cudaStream,
device_hitTuple_apc_,
device_hitToTuple_apc_, // needed only to be reset, ready for next kernel
hh.view(),
device_theCells_.get(),
device_nCells_,
device_theCellNeighbors_.get(),
device_isOuterHitOfCell_.get(),
m_params.hardCurvCut_,
m_params.ptmin_,
m_params.CAThetaCutBarrel_,
m_params.CAThetaCutForward_,
m_params.dcaCutInnerTriplet_,
m_params.dcaCutOuterTriplet_);
cudaCheck(hipGetLastError());
if (nhits > 1 && m_params.earlyFishbone_) {
auto nthTot = 128;
auto stride = 16;
auto blockSize = nthTot / stride;
auto numberOfBlocks = (nhits + blockSize - 1) / blockSize;
dim3 blks(1, numberOfBlocks, 1);
dim3 thrs(stride, blockSize, 1);
hipLaunchKernelGGL(( gpuPixelDoublets::fishbone), dim3(blks), dim3(thrs), 0, cudaStream,
hh.view(), device_theCells_.get(), device_nCells_, device_isOuterHitOfCell_.get(), nhits, false);
cudaCheck(hipGetLastError());
}
blockSize = 64;
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_find_ntuplets), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, hh.view(),
device_theCells_.get(),
device_nCells_,
device_theCellTracks_.get(),
tuples_d,
device_hitTuple_apc_,
quality_d,
m_params.minHitsPerNtuplet_);
cudaCheck(hipGetLastError());
if (m_params.doStats_)
hipLaunchKernelGGL(( kernel_mark_used), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, hh.view(), device_theCells_.get(), device_nCells_);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
blockSize = 128;
numberOfBlocks = (HitContainer::totbins() + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( cms::cuda::finalizeBulk), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_hitTuple_apc_, tuples_d);
// remove duplicates (tracks that share a doublet)
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_earlyDuplicateRemover), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
device_theCells_.get(), device_nCells_, tuples_d, quality_d);
cudaCheck(hipGetLastError());
blockSize = 128;
numberOfBlocks = (3 * CAConstants::maxTuples() / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_countMultiplicity), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
tuples_d, quality_d, device_tupleMultiplicity_.get());
cms::cuda::launchFinalize(device_tupleMultiplicity_.get(), cudaStream);
hipLaunchKernelGGL(( kernel_fillMultiplicity), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
tuples_d, quality_d, device_tupleMultiplicity_.get());
cudaCheck(hipGetLastError());
if (nhits > 1 && m_params.lateFishbone_) {
auto nthTot = 128;
auto stride = 16;
auto blockSize = nthTot / stride;
auto numberOfBlocks = (nhits + blockSize - 1) / blockSize;
dim3 blks(1, numberOfBlocks, 1);
dim3 thrs(stride, blockSize, 1);
hipLaunchKernelGGL(( gpuPixelDoublets::fishbone), dim3(blks), dim3(thrs), 0, cudaStream,
hh.view(), device_theCells_.get(), device_nCells_, device_isOuterHitOfCell_.get(), nhits, true);
cudaCheck(hipGetLastError());
}
if (m_params.doStats_) {
numberOfBlocks = (::max(nhits, m_params.maxNumberOfDoublets_) + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_checkOverflows), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d,
device_tupleMultiplicity_.get(),
device_hitTuple_apc_,
device_theCells_.get(),
device_nCells_,
device_theCellNeighbors_.get(),
device_theCellTracks_.get(),
device_isOuterHitOfCell_.get(),
nhits,
m_params.maxNumberOfDoublets_,
counters_);
cudaCheck(hipGetLastError());
}
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// free space asap
// device_isOuterHitOfCell_.reset();
}
template <>
void CAHitNtupletGeneratorKernelsGPU::buildDoublets(HitsOnCPU const &hh, hipStream_t stream) {
auto nhits = hh.nHits();
#ifdef NTUPLE_DEBUG
std::cout << "building Doublets out of " << nhits << " Hits" << std::endl;
#endif
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// in principle we can use "nhits" to heuristically dimension the workspace...
device_isOuterHitOfCell_ = Traits::template make_unique<GPUCACell::OuterHitOfCell[]>(::max(1U, nhits), stream);
assert(device_isOuterHitOfCell_.get());
cellStorage_ = Traits::template make_unique<unsigned char[]>(
CAConstants::maxNumOfActiveDoublets() * sizeof(GPUCACell::CellNeighbors) +
CAConstants::maxNumOfActiveDoublets() * sizeof(GPUCACell::CellTracks),
stream);
device_theCellNeighborsContainer_ = (GPUCACell::CellNeighbors *)cellStorage_.get();
device_theCellTracksContainer_ =
(GPUCACell::CellTracks *)(cellStorage_.get() +
CAConstants::maxNumOfActiveDoublets() * sizeof(GPUCACell::CellNeighbors));
{
int threadsPerBlock = 128;
// at least one block!
int blocks = (::max(1U, nhits) + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( gpuPixelDoublets::initDoublets), dim3(blocks), dim3(threadsPerBlock), 0, stream, device_isOuterHitOfCell_.get(),
nhits,
device_theCellNeighbors_.get(),
device_theCellNeighborsContainer_,
device_theCellTracks_.get(),
device_theCellTracksContainer_);
cudaCheck(hipGetLastError());
}
device_theCells_ = Traits::template make_unique<GPUCACell[]>(m_params.maxNumberOfDoublets_, stream);
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
if (0 == nhits)
return; // protect against empty events
// FIXME avoid magic numbers
auto nActualPairs = gpuPixelDoublets::nPairs;
if (!m_params.includeJumpingForwardDoublets_)
nActualPairs = 15;
if (m_params.minHitsPerNtuplet_ > 3) {
nActualPairs = 13;
}
assert(nActualPairs <= gpuPixelDoublets::nPairs);
int stride = 4;
int threadsPerBlock = gpuPixelDoublets::getDoubletsFromHistoMaxBlockSize / stride;
int blocks = (4 * nhits + threadsPerBlock - 1) / threadsPerBlock;
dim3 blks(1, blocks, 1);
dim3 thrs(stride, threadsPerBlock, 1);
hipLaunchKernelGGL(( gpuPixelDoublets::getDoubletsFromHisto), dim3(blks), dim3(thrs), 0, stream, device_theCells_.get(),
device_nCells_,
device_theCellNeighbors_.get(),
device_theCellTracks_.get(),
hh.view(),
device_isOuterHitOfCell_.get(),
nActualPairs,
m_params.idealConditions_,
m_params.doClusterCut_,
m_params.doZ0Cut_,
m_params.doPtCut_,
m_params.maxNumberOfDoublets_);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
}
template <>
void CAHitNtupletGeneratorKernelsGPU::classifyTuples(HitsOnCPU const &hh, TkSoA *tracks_d, hipStream_t cudaStream) {
// these are pointer on GPU!
auto const *tuples_d = &tracks_d->hitIndices;
auto *quality_d = (Quality *)(&tracks_d->m_quality);
auto blockSize = 64;
// classify tracks based on kinematics
auto numberOfBlocks = (3 * CAConstants::maxNumberOfQuadruplets() / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_classifyTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, tracks_d, m_params.cuts_, quality_d);
cudaCheck(hipGetLastError());
if (m_params.lateFishbone_) {
// apply fishbone cleaning to good tracks
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_fishboneCleaner), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
device_theCells_.get(), device_nCells_, quality_d);
cudaCheck(hipGetLastError());
}
// remove duplicates (tracks that share a doublet)
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_fastDuplicateRemover), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
device_theCells_.get(), device_nCells_, tuples_d, tracks_d);
cudaCheck(hipGetLastError());
if (m_params.minHitsPerNtuplet_ < 4 || m_params.doStats_) {
// fill hit->track "map"
numberOfBlocks = (3 * CAConstants::maxNumberOfQuadruplets() / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_countHitInTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
tuples_d, quality_d, device_hitToTuple_.get());
cudaCheck(hipGetLastError());
cms::cuda::launchFinalize(device_hitToTuple_.get(), cudaStream);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( kernel_fillHitInTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, device_hitToTuple_.get());
cudaCheck(hipGetLastError());
}
if (m_params.minHitsPerNtuplet_ < 4) {
// remove duplicates (tracks that share a hit)
numberOfBlocks = (HitToTuple::capacity() + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_tripletCleaner), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream,
hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get());
cudaCheck(hipGetLastError());
}
if (m_params.doStats_) {
// counters (add flag???)
numberOfBlocks = (HitToTuple::capacity() + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_doStatsForHitInTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, device_hitToTuple_.get(), counters_);
cudaCheck(hipGetLastError());
numberOfBlocks = (3 * CAConstants::maxNumberOfQuadruplets() / 4 + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( kernel_doStatsForTracks), dim3(numberOfBlocks), dim3(blockSize), 0, cudaStream, tuples_d, quality_d, counters_);
cudaCheck(hipGetLastError());
}
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
#ifdef DUMP_GPU_TK_TUPLES
static std::atomic<int> iev(0);
++iev;
hipLaunchKernelGGL(( kernel_print_found_ntuplets), dim3(1), dim3(32), 0, cudaStream,
hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get(), 100, iev);
#endif
}
template <>
void CAHitNtupletGeneratorKernelsGPU::printCounters(Counters const *counters) {
hipLaunchKernelGGL(( kernel_printCounters), dim3(1), dim3(1), 0, 0, counters);
}
| de3fc28d18123a2949f70d7e2d1cf8bb92212fb6.cu | #include "CAHitNtupletGeneratorKernelsImpl.h"
template <>
void CAHitNtupletGeneratorKernelsGPU::fillHitDetIndices(HitsView const *hv, TkSoA *tracks_d, cudaStream_t cudaStream) {
auto blockSize = 128;
auto numberOfBlocks = (HitContainer::capacity() + blockSize - 1) / blockSize;
kernel_fillHitDetIndices<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
&tracks_d->hitIndices, hv, &tracks_d->detIndices);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
}
template <>
void CAHitNtupletGeneratorKernelsGPU::launchKernels(HitsOnCPU const &hh, TkSoA *tracks_d, cudaStream_t cudaStream) {
// these are pointer on GPU!
auto *tuples_d = &tracks_d->hitIndices;
auto *quality_d = (Quality *)(&tracks_d->m_quality);
// zero tuples
cms::cuda::launchZero(tuples_d, cudaStream);
auto nhits = hh.nHits();
assert(nhits <= pixelGPUConstants::maxNumberOfHits);
// std::cout << "N hits " << nhits << std::endl;
// if (nhits<2) std::cout << "too few hits " << nhits << std::endl;
//
// applying conbinatoric cleaning such as fishbone at this stage is too expensive
//
auto nthTot = 64;
auto stride = 4;
auto blockSize = nthTot / stride;
auto numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
auto rescale = numberOfBlocks / 65536;
blockSize *= (rescale + 1);
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
assert(numberOfBlocks < 65536);
assert(blockSize > 0 && 0 == blockSize % 16);
dim3 blks(1, numberOfBlocks, 1);
dim3 thrs(stride, blockSize, 1);
kernel_connect<<<blks, thrs, 0, cudaStream>>>(
device_hitTuple_apc_,
device_hitToTuple_apc_, // needed only to be reset, ready for next kernel
hh.view(),
device_theCells_.get(),
device_nCells_,
device_theCellNeighbors_.get(),
device_isOuterHitOfCell_.get(),
m_params.hardCurvCut_,
m_params.ptmin_,
m_params.CAThetaCutBarrel_,
m_params.CAThetaCutForward_,
m_params.dcaCutInnerTriplet_,
m_params.dcaCutOuterTriplet_);
cudaCheck(cudaGetLastError());
if (nhits > 1 && m_params.earlyFishbone_) {
auto nthTot = 128;
auto stride = 16;
auto blockSize = nthTot / stride;
auto numberOfBlocks = (nhits + blockSize - 1) / blockSize;
dim3 blks(1, numberOfBlocks, 1);
dim3 thrs(stride, blockSize, 1);
gpuPixelDoublets::fishbone<<<blks, thrs, 0, cudaStream>>>(
hh.view(), device_theCells_.get(), device_nCells_, device_isOuterHitOfCell_.get(), nhits, false);
cudaCheck(cudaGetLastError());
}
blockSize = 64;
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
kernel_find_ntuplets<<<numberOfBlocks, blockSize, 0, cudaStream>>>(hh.view(),
device_theCells_.get(),
device_nCells_,
device_theCellTracks_.get(),
tuples_d,
device_hitTuple_apc_,
quality_d,
m_params.minHitsPerNtuplet_);
cudaCheck(cudaGetLastError());
if (m_params.doStats_)
kernel_mark_used<<<numberOfBlocks, blockSize, 0, cudaStream>>>(hh.view(), device_theCells_.get(), device_nCells_);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
blockSize = 128;
numberOfBlocks = (HitContainer::totbins() + blockSize - 1) / blockSize;
cms::cuda::finalizeBulk<<<numberOfBlocks, blockSize, 0, cudaStream>>>(device_hitTuple_apc_, tuples_d);
// remove duplicates (tracks that share a doublet)
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
kernel_earlyDuplicateRemover<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
device_theCells_.get(), device_nCells_, tuples_d, quality_d);
cudaCheck(cudaGetLastError());
blockSize = 128;
numberOfBlocks = (3 * CAConstants::maxTuples() / 4 + blockSize - 1) / blockSize;
kernel_countMultiplicity<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
tuples_d, quality_d, device_tupleMultiplicity_.get());
cms::cuda::launchFinalize(device_tupleMultiplicity_.get(), cudaStream);
kernel_fillMultiplicity<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
tuples_d, quality_d, device_tupleMultiplicity_.get());
cudaCheck(cudaGetLastError());
if (nhits > 1 && m_params.lateFishbone_) {
auto nthTot = 128;
auto stride = 16;
auto blockSize = nthTot / stride;
auto numberOfBlocks = (nhits + blockSize - 1) / blockSize;
dim3 blks(1, numberOfBlocks, 1);
dim3 thrs(stride, blockSize, 1);
gpuPixelDoublets::fishbone<<<blks, thrs, 0, cudaStream>>>(
hh.view(), device_theCells_.get(), device_nCells_, device_isOuterHitOfCell_.get(), nhits, true);
cudaCheck(cudaGetLastError());
}
if (m_params.doStats_) {
numberOfBlocks = (std::max(nhits, m_params.maxNumberOfDoublets_) + blockSize - 1) / blockSize;
kernel_checkOverflows<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d,
device_tupleMultiplicity_.get(),
device_hitTuple_apc_,
device_theCells_.get(),
device_nCells_,
device_theCellNeighbors_.get(),
device_theCellTracks_.get(),
device_isOuterHitOfCell_.get(),
nhits,
m_params.maxNumberOfDoublets_,
counters_);
cudaCheck(cudaGetLastError());
}
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// free space asap
// device_isOuterHitOfCell_.reset();
}
template <>
void CAHitNtupletGeneratorKernelsGPU::buildDoublets(HitsOnCPU const &hh, cudaStream_t stream) {
auto nhits = hh.nHits();
#ifdef NTUPLE_DEBUG
std::cout << "building Doublets out of " << nhits << " Hits" << std::endl;
#endif
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// in principle we can use "nhits" to heuristically dimension the workspace...
device_isOuterHitOfCell_ = Traits::template make_unique<GPUCACell::OuterHitOfCell[]>(std::max(1U, nhits), stream);
assert(device_isOuterHitOfCell_.get());
cellStorage_ = Traits::template make_unique<unsigned char[]>(
CAConstants::maxNumOfActiveDoublets() * sizeof(GPUCACell::CellNeighbors) +
CAConstants::maxNumOfActiveDoublets() * sizeof(GPUCACell::CellTracks),
stream);
device_theCellNeighborsContainer_ = (GPUCACell::CellNeighbors *)cellStorage_.get();
device_theCellTracksContainer_ =
(GPUCACell::CellTracks *)(cellStorage_.get() +
CAConstants::maxNumOfActiveDoublets() * sizeof(GPUCACell::CellNeighbors));
{
int threadsPerBlock = 128;
// at least one block!
int blocks = (std::max(1U, nhits) + threadsPerBlock - 1) / threadsPerBlock;
gpuPixelDoublets::initDoublets<<<blocks, threadsPerBlock, 0, stream>>>(device_isOuterHitOfCell_.get(),
nhits,
device_theCellNeighbors_.get(),
device_theCellNeighborsContainer_,
device_theCellTracks_.get(),
device_theCellTracksContainer_);
cudaCheck(cudaGetLastError());
}
device_theCells_ = Traits::template make_unique<GPUCACell[]>(m_params.maxNumberOfDoublets_, stream);
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
if (0 == nhits)
return; // protect against empty events
// FIXME avoid magic numbers
auto nActualPairs = gpuPixelDoublets::nPairs;
if (!m_params.includeJumpingForwardDoublets_)
nActualPairs = 15;
if (m_params.minHitsPerNtuplet_ > 3) {
nActualPairs = 13;
}
assert(nActualPairs <= gpuPixelDoublets::nPairs);
int stride = 4;
int threadsPerBlock = gpuPixelDoublets::getDoubletsFromHistoMaxBlockSize / stride;
int blocks = (4 * nhits + threadsPerBlock - 1) / threadsPerBlock;
dim3 blks(1, blocks, 1);
dim3 thrs(stride, threadsPerBlock, 1);
gpuPixelDoublets::getDoubletsFromHisto<<<blks, thrs, 0, stream>>>(device_theCells_.get(),
device_nCells_,
device_theCellNeighbors_.get(),
device_theCellTracks_.get(),
hh.view(),
device_isOuterHitOfCell_.get(),
nActualPairs,
m_params.idealConditions_,
m_params.doClusterCut_,
m_params.doZ0Cut_,
m_params.doPtCut_,
m_params.maxNumberOfDoublets_);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
}
template <>
void CAHitNtupletGeneratorKernelsGPU::classifyTuples(HitsOnCPU const &hh, TkSoA *tracks_d, cudaStream_t cudaStream) {
// these are pointer on GPU!
auto const *tuples_d = &tracks_d->hitIndices;
auto *quality_d = (Quality *)(&tracks_d->m_quality);
auto blockSize = 64;
// classify tracks based on kinematics
auto numberOfBlocks = (3 * CAConstants::maxNumberOfQuadruplets() / 4 + blockSize - 1) / blockSize;
kernel_classifyTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, tracks_d, m_params.cuts_, quality_d);
cudaCheck(cudaGetLastError());
if (m_params.lateFishbone_) {
// apply fishbone cleaning to good tracks
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
kernel_fishboneCleaner<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
device_theCells_.get(), device_nCells_, quality_d);
cudaCheck(cudaGetLastError());
}
// remove duplicates (tracks that share a doublet)
numberOfBlocks = (3 * m_params.maxNumberOfDoublets_ / 4 + blockSize - 1) / blockSize;
kernel_fastDuplicateRemover<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
device_theCells_.get(), device_nCells_, tuples_d, tracks_d);
cudaCheck(cudaGetLastError());
if (m_params.minHitsPerNtuplet_ < 4 || m_params.doStats_) {
// fill hit->track "map"
numberOfBlocks = (3 * CAConstants::maxNumberOfQuadruplets() / 4 + blockSize - 1) / blockSize;
kernel_countHitInTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
tuples_d, quality_d, device_hitToTuple_.get());
cudaCheck(cudaGetLastError());
cms::cuda::launchFinalize(device_hitToTuple_.get(), cudaStream);
cudaCheck(cudaGetLastError());
kernel_fillHitInTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, quality_d, device_hitToTuple_.get());
cudaCheck(cudaGetLastError());
}
if (m_params.minHitsPerNtuplet_ < 4) {
// remove duplicates (tracks that share a hit)
numberOfBlocks = (HitToTuple::capacity() + blockSize - 1) / blockSize;
kernel_tripletCleaner<<<numberOfBlocks, blockSize, 0, cudaStream>>>(
hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get());
cudaCheck(cudaGetLastError());
}
if (m_params.doStats_) {
// counters (add flag???)
numberOfBlocks = (HitToTuple::capacity() + blockSize - 1) / blockSize;
kernel_doStatsForHitInTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(device_hitToTuple_.get(), counters_);
cudaCheck(cudaGetLastError());
numberOfBlocks = (3 * CAConstants::maxNumberOfQuadruplets() / 4 + blockSize - 1) / blockSize;
kernel_doStatsForTracks<<<numberOfBlocks, blockSize, 0, cudaStream>>>(tuples_d, quality_d, counters_);
cudaCheck(cudaGetLastError());
}
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
#ifdef DUMP_GPU_TK_TUPLES
static std::atomic<int> iev(0);
++iev;
kernel_print_found_ntuplets<<<1, 32, 0, cudaStream>>>(
hh.view(), tuples_d, tracks_d, quality_d, device_hitToTuple_.get(), 100, iev);
#endif
}
template <>
void CAHitNtupletGeneratorKernelsGPU::printCounters(Counters const *counters) {
kernel_printCounters<<<1, 1>>>(counters);
}
|
c081b328f0721c7a71a92e44533d2bdda6c77eb7.hip | // !!! This is a file automatically generated by hipify!!!
/**
-- (C) Copyright 2013 King Abdullah University of Science and Technology
Authors:
Ahmad Abdelfattah ([email protected])
David Keyes ([email protected])
Hatem Ltaief ([email protected])
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the King Abdullah University of Science and
Technology nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <stdio.h>
#include "mgpu_control.h"
/*****************************************************************************************/
extern "C"
void kblas_smalloc_mgpu_1D( int rows, int cols, float** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<float>(rows, cols, dA, ngpus, ldb, block_size);
}
extern "C"
void kblas_dmalloc_mgpu_1D( int rows, int cols, double** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<double>(rows, cols, dA, ngpus, ldb, block_size);
}
extern "C"
void kblas_cmalloc_mgpu_1D( int rows, int cols, cuFloatComplex** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<cuFloatComplex>(rows, cols, dA, ngpus, ldb, block_size);
}
extern "C"
void kblas_zmalloc_mgpu_1D( int rows, int cols, hipDoubleComplex** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<hipDoubleComplex>(rows, cols, dA, ngpus, ldb, block_size);
}
/*****************************************************************************************/
extern "C"
void kblas_ssetmatrix_mgpu_1D(int rows, int cols, float* A, int LDA, float** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<float>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
extern "C"
void kblas_dsetmatrix_mgpu_1D(int rows, int cols, double* A, int LDA, double** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<double>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
extern "C"
void kblas_csetmatrix_mgpu_1D(int rows, int cols, cuFloatComplex* A, int LDA, cuFloatComplex** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<cuFloatComplex>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
extern "C"
void kblas_zsetmatrix_mgpu_1D(int rows, int cols, hipDoubleComplex* A, int LDA, hipDoubleComplex** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<hipDoubleComplex>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
/*****************************************************************************************/
extern "C"
void kblas_ssetvector_mgpu_1D(int n, float* Y, float** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<float>(n, Y, dY, ngpus, block_size);
}
extern "C"
void kblas_dsetvector_mgpu_1D(int n, double* Y, double** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<double>(n, Y, dY, ngpus, block_size);
}
extern "C"
void kblas_csetvector_mgpu_1D(int n, cuFloatComplex* Y, cuFloatComplex** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<cuFloatComplex>(n, Y, dY, ngpus, block_size);
}
extern "C"
void kblas_zsetvector_mgpu_1D(int n, hipDoubleComplex* Y, hipDoubleComplex** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<hipDoubleComplex>(n, Y, dY, ngpus, block_size);
}
| c081b328f0721c7a71a92e44533d2bdda6c77eb7.cu | /**
-- (C) Copyright 2013 King Abdullah University of Science and Technology
Authors:
Ahmad Abdelfattah ([email protected])
David Keyes ([email protected])
Hatem Ltaief ([email protected])
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the King Abdullah University of Science and
Technology nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include <stdio.h>
#include "mgpu_control.h"
/*****************************************************************************************/
extern "C"
void kblas_smalloc_mgpu_1D( int rows, int cols, float** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<float>(rows, cols, dA, ngpus, ldb, block_size);
}
extern "C"
void kblas_dmalloc_mgpu_1D( int rows, int cols, double** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<double>(rows, cols, dA, ngpus, ldb, block_size);
}
extern "C"
void kblas_cmalloc_mgpu_1D( int rows, int cols, cuFloatComplex** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<cuFloatComplex>(rows, cols, dA, ngpus, ldb, block_size);
}
extern "C"
void kblas_zmalloc_mgpu_1D( int rows, int cols, cuDoubleComplex** dA, int ngpus, int ldb, int block_size)
{
kblas_malloc_mgpu_1D<cuDoubleComplex>(rows, cols, dA, ngpus, ldb, block_size);
}
/*****************************************************************************************/
extern "C"
void kblas_ssetmatrix_mgpu_1D(int rows, int cols, float* A, int LDA, float** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<float>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
extern "C"
void kblas_dsetmatrix_mgpu_1D(int rows, int cols, double* A, int LDA, double** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<double>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
extern "C"
void kblas_csetmatrix_mgpu_1D(int rows, int cols, cuFloatComplex* A, int LDA, cuFloatComplex** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<cuFloatComplex>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
extern "C"
void kblas_zsetmatrix_mgpu_1D(int rows, int cols, cuDoubleComplex* A, int LDA, cuDoubleComplex** dA, int LDB, int ngpus, int block_size)
{
kblas_setmatrix_mgpu_1D<cuDoubleComplex>(rows, cols, A, LDA, dA, LDB, ngpus, block_size);
}
/*****************************************************************************************/
extern "C"
void kblas_ssetvector_mgpu_1D(int n, float* Y, float** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<float>(n, Y, dY, ngpus, block_size);
}
extern "C"
void kblas_dsetvector_mgpu_1D(int n, double* Y, double** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<double>(n, Y, dY, ngpus, block_size);
}
extern "C"
void kblas_csetvector_mgpu_1D(int n, cuFloatComplex* Y, cuFloatComplex** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<cuFloatComplex>(n, Y, dY, ngpus, block_size);
}
extern "C"
void kblas_zsetvector_mgpu_1D(int n, cuDoubleComplex* Y, cuDoubleComplex** dY, int ngpus, int block_size)
{
kblas_setvector_mgpu_1D<cuDoubleComplex>(n, Y, dY, ngpus, block_size);
}
|
476b92d8c7bbb3e277b63df57e35366b25d6df5f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <iostream>
#include <boost/thread.hpp>
#include <CUDA/HPP/CalElapsedTime.hpp>
#include <CUDA/HPP/InitData.hpp>
#include <CUDA/HPP/Transpose.hpp>
typedef float value_type;
int X = ::pow(2, 12);
int Y = ::pow(2, 10);
int x = 16;
int y = 16;
std::size_t NBytes = X * Y * sizeof(int);
dim3 Thread(x, y, 1);
dim3 Block(X / x, Y / y, 1);
int* in;
int* out;
void SpawnSimple() {
hipLaunchKernelGGL(( Transpose<int, 4096, 2048>), dim3(Block), dim3(Thread), 0, 0, in, out);
}
void SpawnShared() {
hipLaunchKernelGGL(( TransposeShared<int, 4096, 2048, 16, 16>), dim3(Block), dim3(Thread), 0, 0, in, out);
}
void DoEvaluation() {
hipMalloc((void**)&in, NBytes);
hipMalloc((void**)&out, NBytes);
hipLaunchKernelGGL(( InitData<int, 4096>), dim3(Block), dim3(Thread), 0, 0, in, out);
std::cout << CalElapsedTime<>::Execution(SpawnSimple) << '\n';
boost::this_thread::sleep_for(boost::chrono::seconds(3));
hipLaunchKernelGGL(( InitData<int, 4096>), dim3(Block), dim3(Thread), 0, 0, in, out);
std::cout << CalElapsedTime<>::Execution(SpawnShared) << '\n';
hipFree(in);
hipFree(out);
}
auto main() -> decltype(0) {
DoEvaluation();
return 0;
}
| 476b92d8c7bbb3e277b63df57e35366b25d6df5f.cu | #include <cmath>
#include <cstdlib>
#include <cuda_runtime.h>
#include <iostream>
#include <boost/thread.hpp>
#include <CUDA/HPP/CalElapsedTime.hpp>
#include <CUDA/HPP/InitData.hpp>
#include <CUDA/HPP/Transpose.hpp>
typedef float value_type;
int X = std::pow(2, 12);
int Y = std::pow(2, 10);
int x = 16;
int y = 16;
std::size_t NBytes = X * Y * sizeof(int);
dim3 Thread(x, y, 1);
dim3 Block(X / x, Y / y, 1);
int* in;
int* out;
void SpawnSimple() {
Transpose<int, 4096, 2048><<<Block, Thread>>>(in, out);
}
void SpawnShared() {
TransposeShared<int, 4096, 2048, 16, 16><<<Block, Thread>>>(in, out);
}
void DoEvaluation() {
cudaMalloc((void**)&in, NBytes);
cudaMalloc((void**)&out, NBytes);
InitData<int, 4096><<<Block, Thread>>>(in, out);
std::cout << CalElapsedTime<>::Execution(SpawnSimple) << '\n';
boost::this_thread::sleep_for(boost::chrono::seconds(3));
InitData<int, 4096><<<Block, Thread>>>(in, out);
std::cout << CalElapsedTime<>::Execution(SpawnShared) << '\n';
cudaFree(in);
cudaFree(out);
}
auto main() -> decltype(0) {
DoEvaluation();
return 0;
}
|
f0bf16d112119873f2e3a344ae09f23a830fe537.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include "io.cuh"
#include "fp.cuh"
#include "bp_hip.cuh"
#include "init_hip.cuh"
#include "utils.cuh"
#include "global_hip.cuh"
#include "fp_gpu.cuh"
#include "bp_gpu.cuh"
#include "global_gpu.cuh"
float max_acc;
clock_t t;
int main(int argc,char *argv[])
{
printf("====== aininot260 [email protected] ======\n");
printf(" Processor used : %s\n",argv[1]);
printf(" Learning rate : %.2f\n",alpha);
printf(" Epochs : %d\n",epochs);
printf(" Batch size : %d\n",minibatch);
printf("========================================\n");
printf("\n");
load_data();
init_params();
if(strcmp(argv[1],"CPU")==0)
{
for(int i=1;i<=epochs;i++)
{
t=clock();
correct_cnt=0;
avg_error=0;
for(int j=0;j<TRAIN_NUM;j++)
{
set_input(j,train_image);
input_conv();
conv_pool();
pool_fc1();
fc1_fc2();
set_answer(j,train_label);
check_answer(correct_cnt);
get_error(avg_error);
update_fc2_b();
update_fc2_w();
update_fc1_b();
update_fc1_w();
update_conv_b();
update_conv_w();
if((j+1)%minibatch==0)
assign_grads();
}
printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TRAIN_NUM,((float)correct_cnt/TRAIN_NUM)*100,(avg_error/TRAIN_NUM)*100,i);
correct_cnt=0;
avg_error=0;
for(int j=0;j<TEST_NUM;j++)
{
set_input(j,test_image);
input_conv();
conv_pool();
pool_fc1();
fc1_fc2();
set_answer(j,test_label);
check_answer(correct_cnt);
get_error(avg_error);
}
printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TEST_NUM,((float)correct_cnt/TEST_NUM)*100,(avg_error/TEST_NUM)*100);
if((float)correct_cnt/TEST_NUM*100>max_acc)
{
max_acc=(float)correct_cnt/TEST_NUM*100;
export_params();
printf("The new model has been exported. Accuracy has reached to %0.5f%%\n\n",max_acc);
}
else
{
alpha=alpha-(alpha/3);
printf("Learning rate has been reduced to %f\n\n",alpha);
}
}
}
else if(strcmp(argv[1],"GPU")==0)
{
initDevice(0);
int n_stream=N_STREAM;
CHECK(hipMemcpyToSymbol(_minibatch,&minibatch,sizeof(int)));
stream=(hipStream_t*)malloc(n_stream*sizeof(hipStream_t));
for(int i=0;i<n_stream;i++)
hipStreamCreateWithFlags(&stream[i],hipStreamNonBlocking);
// hipStreamCreate(&stream[i]);
for(int i=1;i<=epochs;i++)
{
t=clock();
correct_cnt=0;
avg_error=0;
for(int j=0;j<TRAIN_NUM;j++)
{
fp_conv_pool_gpu(j,1);
fp_fc_answer_gpu(j,1);
bp_update_gpu(j);
if((j+1)%minibatch==0)
bp_assign_grads_gpu(j);
}
hipDeviceSynchronize();
printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TRAIN_NUM,((float)correct_cnt/TRAIN_NUM)*100,(avg_error/TRAIN_NUM)*100,i);
correct_cnt=0;
avg_error=0;
for(int j=0;j<TEST_NUM;j++)
{
fp_conv_pool_gpu(j,0);
fp_fc_answer_gpu(j,0);
}
hipDeviceSynchronize();
printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TEST_NUM,((float)correct_cnt/TEST_NUM)*100,(avg_error/TEST_NUM)*100);
if((float)correct_cnt/TEST_NUM*100>max_acc)
{
max_acc=(float)correct_cnt/TEST_NUM*100;
export_params();
printf("The new model has been exported. Accuracy has reached to %0.5f%%\n\n",max_acc);
}
else
{
alpha=alpha-(alpha/3);
printf("Learning rate has been reduced to %f\n\n",alpha);
}
}
for(int i=0;i<n_stream;i++)
hipStreamDestroy(stream[i]);
free(stream);
}
else
{
printf("The parameter can only be GPU or CPU!\n");
return 0;
}
return 0;
} | f0bf16d112119873f2e3a344ae09f23a830fe537.cu | #include "stdio.h"
#include "io.cuh"
#include "fp.cuh"
#include "bp.cuh"
#include "init.cuh"
#include "utils.cuh"
#include "global.cuh"
#include "fp_gpu.cuh"
#include "bp_gpu.cuh"
#include "global_gpu.cuh"
float max_acc;
clock_t t;
int main(int argc,char *argv[])
{
printf("====== aininot260 [email protected] ======\n");
printf(" Processor used : %s\n",argv[1]);
printf(" Learning rate : %.2f\n",alpha);
printf(" Epochs : %d\n",epochs);
printf(" Batch size : %d\n",minibatch);
printf("========================================\n");
printf("\n");
load_data();
init_params();
if(strcmp(argv[1],"CPU")==0)
{
for(int i=1;i<=epochs;i++)
{
t=clock();
correct_cnt=0;
avg_error=0;
for(int j=0;j<TRAIN_NUM;j++)
{
set_input(j,train_image);
input_conv();
conv_pool();
pool_fc1();
fc1_fc2();
set_answer(j,train_label);
check_answer(correct_cnt);
get_error(avg_error);
update_fc2_b();
update_fc2_w();
update_fc1_b();
update_fc1_w();
update_conv_b();
update_conv_w();
if((j+1)%minibatch==0)
assign_grads();
}
printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TRAIN_NUM,((float)correct_cnt/TRAIN_NUM)*100,(avg_error/TRAIN_NUM)*100,i);
correct_cnt=0;
avg_error=0;
for(int j=0;j<TEST_NUM;j++)
{
set_input(j,test_image);
input_conv();
conv_pool();
pool_fc1();
fc1_fc2();
set_answer(j,test_label);
check_answer(correct_cnt);
get_error(avg_error);
}
printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TEST_NUM,((float)correct_cnt/TEST_NUM)*100,(avg_error/TEST_NUM)*100);
if((float)correct_cnt/TEST_NUM*100>max_acc)
{
max_acc=(float)correct_cnt/TEST_NUM*100;
export_params();
printf("The new model has been exported. Accuracy has reached to %0.5f%%\n\n",max_acc);
}
else
{
alpha=alpha-(alpha/3);
printf("Learning rate has been reduced to %f\n\n",alpha);
}
}
}
else if(strcmp(argv[1],"GPU")==0)
{
initDevice(0);
int n_stream=N_STREAM;
CHECK(cudaMemcpyToSymbol(_minibatch,&minibatch,sizeof(int)));
stream=(cudaStream_t*)malloc(n_stream*sizeof(cudaStream_t));
for(int i=0;i<n_stream;i++)
cudaStreamCreateWithFlags(&stream[i],cudaStreamNonBlocking);
// cudaStreamCreate(&stream[i]);
for(int i=1;i<=epochs;i++)
{
t=clock();
correct_cnt=0;
avg_error=0;
for(int j=0;j<TRAIN_NUM;j++)
{
fp_conv_pool_gpu(j,1);
fp_fc_answer_gpu(j,1);
bp_update_gpu(j);
if((j+1)%minibatch==0)
bp_assign_grads_gpu(j);
}
cudaDeviceSynchronize();
printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TRAIN_NUM,((float)correct_cnt/TRAIN_NUM)*100,(avg_error/TRAIN_NUM)*100,i);
correct_cnt=0;
avg_error=0;
for(int j=0;j<TEST_NUM;j++)
{
fp_conv_pool_gpu(j,0);
fp_fc_answer_gpu(j,0);
}
cudaDeviceSynchronize();
printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n",floor(((float)(clock()-t))/CLOCKS_PER_SEC),TEST_NUM,((float)correct_cnt/TEST_NUM)*100,(avg_error/TEST_NUM)*100);
if((float)correct_cnt/TEST_NUM*100>max_acc)
{
max_acc=(float)correct_cnt/TEST_NUM*100;
export_params();
printf("The new model has been exported. Accuracy has reached to %0.5f%%\n\n",max_acc);
}
else
{
alpha=alpha-(alpha/3);
printf("Learning rate has been reduced to %f\n\n",alpha);
}
}
for(int i=0;i<n_stream;i++)
cudaStreamDestroy(stream[i]);
free(stream);
}
else
{
printf("The parameter can only be GPU or CPU!\n");
return 0;
}
return 0;
} |
e249d5d29c8d5a1126cc3514cc7b39188ed1ceaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <stdio.h>
#include <algorithm>
#include <time.h>
using namespace std;
void readInt(int &n, int &m) {
ifstream fin_n("data/nums.txt");
fin_n >> n >> m;
}
void readGraph(unsigned long long *neib, int n, int m) {
ifstream fin_g("data/graph.txt");
vector<vector<int> > vert;
vert.resize(n);
for (int i = 0; i < m; ++i) {
int u, v;
fin_g >> u >> v;
u--, v--;
neib[i] = ((unsigned long long)u << 32) + v;
}
}
__global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < e_num) {
unsigned long long temp = edge_list[tid];
int u, v;
u = temp & 0xffffffff;
v = temp >> 32;
if (parent[u] != parent[v]) {
parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])];
*flag = 1;
} else {
mark[tid] = 1;
}
}
}
__global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < e_num) {
unsigned long long temp = edge_list[tid];
unsigned int u, v;
u = temp & 0xffffffff;
v = (temp >> 32) & 0xffffffff;
if (parent[u] != parent[v]) {
parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])];
*flag = 1;
} else {
mark[tid] = 1;
}
}
}
__global__ void jump(int *parent, int v_num, int *flag) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < v_num) {
int p = parent[tid];
int p_p = parent[p];
if (p != p_p) {
parent[tid] = p_p;
(*flag) = 1;
}
}
}
int main() {
int n, m;
readInt(n, m);
unsigned long long *h_edge_list, *d_edge_list;
h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long));
readGraph(h_edge_list, n, m);
int h_parent[n], *d_parent;
int h_mark[m], *d_mark;
for (int i = 0; i < n; ++i) {
h_parent[i] = i;
}
for (int i = 0; i < m; ++i) {
h_mark[i] = 0;
}
int flag[1], *d_flag;
int count = 0;
clock_t beg = clock();
do {
flag[0] = 0;
hipMalloc(&d_parent, n * sizeof(int));
hipMalloc(&d_edge_list, m * sizeof(unsigned long long));
hipMalloc(&d_mark, m * sizeof(int));
hipMalloc(&d_flag, sizeof(int));
hipMemcpy(d_parent, h_parent, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), hipMemcpyHostToDevice);
hipMemcpy(d_mark, h_mark, m * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_flag, flag, sizeof(int), hipMemcpyHostToDevice);
if (count) {
hipLaunchKernelGGL(( select_winner_odd), dim3(256), dim3(256), 0, 0, d_parent, d_edge_list, d_mark, d_flag, m);
} else {
hipLaunchKernelGGL(( select_winner_even), dim3(256), dim3(256), 0, 0, d_parent, d_edge_list, d_mark, d_flag, m);
}
hipDeviceSynchronize();
hipMemcpy(flag, d_flag, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_parent, d_parent, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(&d_parent);
hipFree(&d_edge_list);
hipFree(&d_mark);
hipFree(&d_flag);
if (!flag[0]) {
break;
}
count ^= 1;
do {
flag[0] = 0;
hipMalloc(&d_flag, sizeof(int));
hipMalloc(&d_parent, n * sizeof(int));
hipMemcpy(d_flag, flag, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_parent, h_parent, n * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( jump), dim3(256), dim3(256), 0, 0, d_parent, n, d_flag);
hipDeviceSynchronize();
hipMemcpy(flag, d_flag, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_parent, d_parent, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(&d_flag);
hipFree(&d_parent);
} while(flag[0]);
} while(flag);
cout << float(clock() - beg) / CLOCKS_PER_SEC << endl;
sort(h_parent, h_parent + n);
cout << unique(h_parent, h_parent + n) - h_parent;
}
| e249d5d29c8d5a1126cc3514cc7b39188ed1ceaf.cu | #include <iostream>
#include <fstream>
#include <vector>
#include <stdio.h>
#include <algorithm>
#include <time.h>
using namespace std;
void readInt(int &n, int &m) {
ifstream fin_n("data/nums.txt");
fin_n >> n >> m;
}
void readGraph(unsigned long long *neib, int n, int m) {
ifstream fin_g("data/graph.txt");
vector<vector<int> > vert;
vert.resize(n);
for (int i = 0; i < m; ++i) {
int u, v;
fin_g >> u >> v;
u--, v--;
neib[i] = ((unsigned long long)u << 32) + v;
}
}
__global__ void select_winner_odd(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < e_num) {
unsigned long long temp = edge_list[tid];
int u, v;
u = temp & 0xffffffff;
v = temp >> 32;
if (parent[u] != parent[v]) {
parent[max(parent[u], parent[v])] = parent[min(parent[u], parent[v])];
*flag = 1;
} else {
mark[tid] = 1;
}
}
}
__global__ void select_winner_even(int *parent, unsigned long long *edge_list, int *mark, int *flag, int e_num) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < e_num) {
unsigned long long temp = edge_list[tid];
unsigned int u, v;
u = temp & 0xffffffff;
v = (temp >> 32) & 0xffffffff;
if (parent[u] != parent[v]) {
parent[min(parent[u], parent[v])] = parent[max(parent[u], parent[v])];
*flag = 1;
} else {
mark[tid] = 1;
}
}
}
__global__ void jump(int *parent, int v_num, int *flag) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < v_num) {
int p = parent[tid];
int p_p = parent[p];
if (p != p_p) {
parent[tid] = p_p;
(*flag) = 1;
}
}
}
int main() {
int n, m;
readInt(n, m);
unsigned long long *h_edge_list, *d_edge_list;
h_edge_list = (unsigned long long*)malloc(m * sizeof(unsigned long long));
readGraph(h_edge_list, n, m);
int h_parent[n], *d_parent;
int h_mark[m], *d_mark;
for (int i = 0; i < n; ++i) {
h_parent[i] = i;
}
for (int i = 0; i < m; ++i) {
h_mark[i] = 0;
}
int flag[1], *d_flag;
int count = 0;
clock_t beg = clock();
do {
flag[0] = 0;
cudaMalloc(&d_parent, n * sizeof(int));
cudaMalloc(&d_edge_list, m * sizeof(unsigned long long));
cudaMalloc(&d_mark, m * sizeof(int));
cudaMalloc(&d_flag, sizeof(int));
cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_edge_list, h_edge_list, m * sizeof(unsigned long long), cudaMemcpyHostToDevice);
cudaMemcpy(d_mark, h_mark, m * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice);
if (count) {
select_winner_odd<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m);
} else {
select_winner_even<<<256, 256>>>(d_parent, d_edge_list, d_mark, d_flag, m);
}
cudaThreadSynchronize();
cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(&d_parent);
cudaFree(&d_edge_list);
cudaFree(&d_mark);
cudaFree(&d_flag);
if (!flag[0]) {
break;
}
count ^= 1;
do {
flag[0] = 0;
cudaMalloc(&d_flag, sizeof(int));
cudaMalloc(&d_parent, n * sizeof(int));
cudaMemcpy(d_flag, flag, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_parent, h_parent, n * sizeof(int), cudaMemcpyHostToDevice);
jump<<<256, 256>>>(d_parent, n, d_flag);
cudaThreadSynchronize();
cudaMemcpy(flag, d_flag, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_parent, d_parent, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(&d_flag);
cudaFree(&d_parent);
} while(flag[0]);
} while(flag);
cout << float(clock() - beg) / CLOCKS_PER_SEC << endl;
sort(h_parent, h_parent + n);
cout << unique(h_parent, h_parent + n) - h_parent;
}
|
c8404b2daa6e2e294aec5bb247844491be294883.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- KBLAS (version 1.0) --
Ahmad Abdelfattah, Center of Extreme Computing
Hatem Ltaief, Supercomputing Laboratory
David Keyes, Center of Extreme Computing
King Abdullah University of Science and Technology (KAUST)
June 2013
KBLAS is a subset of BLAS routines highly optimized for NVIDIA GPUs
*/
/**
-- Center of Extreme Computing and Supercomputing Laboratory
-- Division of Applied Mathematics and Computational Science
-- King Abdullah University of Science and Technology
-- (C) Copyright 2013
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of Tennessee, Knoxville nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gemv_core.cuh"
#if (SM >= 30)
#define cgemvn_bs (32)
#define cgemvn_ty (16)
#define cgemvn_by (2)
#define cgemvt_bs (32)
#define cgemvt_ty (16)
#define cgemvt_by (2)
#else
#define cgemvn_bs (32)
#define cgemvn_ty (4)
#define cgemvn_by (2)
#define cgemvt_bs (32)
#define cgemvt_ty (4)
#define cgemvt_by (2)
#endif
extern "C"
int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, hipStream_t stream);
int kblas_cgemv_driver( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
hipStream_t stream)
{
if(trans == 'n' || trans == 'N')
{
// scaling with beta
kblas_cscal_async(rows, beta, dY, incy, stream);
int mod_r = rows % cgemvn_bs;
int mod_c = cols % cgemvn_bs;
if(mod_r == 0)
{
if(mod_c == 0)
{
// special case
int blocks = rows/cgemvn_bs;
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
hipLaunchKernelGGL(( gemvn_special<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy);
}
else
{
// generic case for columns only
int blocks = rows/cgemvn_bs;
blocks += 1; // dummy thread block
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
const int irregular_cols = mod_c % elements_per_thread;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 1:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 2:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 3:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 4:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 5:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 6:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 7:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 8:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else // mod_r != 0
{
if(mod_c == 0)
{
// generic case for columns only
int blocks = (rows/cgemvn_bs) + (mod_r != 0);
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c);
}
else
{
// generic case for rows and cols
int blocks = (rows/cgemvn_bs) + (mod_r != 0);
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
const int irregular_cols = mod_c % elements_per_thread;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 1:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 2:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 3:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 4:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 5:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 6:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 7:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 8:hipLaunchKernelGGL(( gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
// scaling with beta
kblas_cscal_async(cols, beta, dY, incy, stream);
int mod_r = rows % cgemvt_bs;
int mod_c = cols % cgemvt_bs;
if(mod_c == 0)
{
if(mod_r == 0)
{
// special case
int blocks = cols/cgemvt_bs;
const int thread_x = cgemvt_bs;
const int thread_y = cgemvt_ty;
const int elements_per_thread = thread_x/(2*thread_y);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
hipLaunchKernelGGL(( gemvt_special<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, conj);
}
else
{
// mod_r != 0
int blocks = cols/cgemvt_bs;
blocks += 1; // dummy thread block
const int thread_x = cgemvt_bs;
const int thread_y = cgemvt_ty;
const int elements_per_thread = thread_x/(2*thread_y);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj);
}
}
else // mod_c != 0
{
int blocks = cols/cgemvt_bs + (mod_c != 0);
const int thread_x = cgemvt_bs;
const int thread_y = cgemvt_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int irregular_cols = mod_c % elements_per_thread;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 1:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 2:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 3:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 4:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 5:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 6:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 7:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 8:hipLaunchKernelGGL(( gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
extern "C"
int kblas_cgemv( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy)
{
return kblas_cgemv_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0);
}
extern "C"
int kblas_cgemv_async( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
hipStream_t stream)
{
return kblas_cgemv_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream);
} | c8404b2daa6e2e294aec5bb247844491be294883.cu | /*
-- KBLAS (version 1.0) --
Ahmad Abdelfattah, Center of Extreme Computing
Hatem Ltaief, Supercomputing Laboratory
David Keyes, Center of Extreme Computing
King Abdullah University of Science and Technology (KAUST)
June 2013
KBLAS is a subset of BLAS routines highly optimized for NVIDIA GPUs
*/
/**
-- Center of Extreme Computing and Supercomputing Laboratory
-- Division of Applied Mathematics and Computational Science
-- King Abdullah University of Science and Technology
-- (C) Copyright 2013
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of Tennessee, Knoxville nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "gemv_core.cuh"
#if (SM >= 30)
#define cgemvn_bs (32)
#define cgemvn_ty (16)
#define cgemvn_by (2)
#define cgemvt_bs (32)
#define cgemvt_ty (16)
#define cgemvt_by (2)
#else
#define cgemvn_bs (32)
#define cgemvn_ty (4)
#define cgemvn_by (2)
#define cgemvt_bs (32)
#define cgemvt_ty (4)
#define cgemvt_by (2)
#endif
extern "C"
int kblas_cscal_async(int n, cuFloatComplex alpha, cuFloatComplex *x, int incx, cudaStream_t stream);
int kblas_cgemv_driver( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
cudaStream_t stream)
{
if(trans == 'n' || trans == 'N')
{
// scaling with beta
kblas_cscal_async(rows, beta, dY, incy, stream);
int mod_r = rows % cgemvn_bs;
int mod_c = cols % cgemvn_bs;
if(mod_r == 0)
{
if(mod_c == 0)
{
// special case
int blocks = rows/cgemvn_bs;
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
gemvn_special<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy);
}
else
{
// generic case for columns only
int blocks = rows/cgemvn_bs;
blocks += 1; // dummy thread block
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
const int irregular_cols = mod_c % elements_per_thread;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 1: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 2: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 3: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 4: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 5: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 6: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 7: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 8: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else // mod_r != 0
{
if(mod_c == 0)
{
// generic case for columns only
int blocks = (rows/cgemvn_bs) + (mod_r != 0);
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c);
}
else
{
// generic case for rows and cols
int blocks = (rows/cgemvn_bs) + (mod_r != 0);
const int thread_x = cgemvn_bs;
const int thread_y = cgemvn_ty;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvn_by);
const int elements_per_thread = thread_x/(2*thread_y);
const int irregular_cols = mod_c % elements_per_thread;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 1: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 2: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 3: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 4: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 5: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 6: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 7: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
case 8: gemvn_generic<cuFloatComplex, cgemvn_bs, cgemvn_bs, cgemvn_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c); break;
default: printf("CGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
// scaling with beta
kblas_cscal_async(cols, beta, dY, incy, stream);
int mod_r = rows % cgemvt_bs;
int mod_c = cols % cgemvt_bs;
if(mod_c == 0)
{
if(mod_r == 0)
{
// special case
int blocks = cols/cgemvt_bs;
const int thread_x = cgemvt_bs;
const int thread_y = cgemvt_ty;
const int elements_per_thread = thread_x/(2*thread_y);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
gemvt_special<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, conj);
}
else
{
// mod_r != 0
int blocks = cols/cgemvt_bs;
blocks += 1; // dummy thread block
const int thread_x = cgemvt_bs;
const int thread_y = cgemvt_ty;
const int elements_per_thread = thread_x/(2*thread_y);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj);
}
}
else // mod_c != 0
{
int blocks = cols/cgemvt_bs + (mod_c != 0);
const int thread_x = cgemvt_bs;
const int thread_y = cgemvt_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int irregular_cols = mod_c % elements_per_thread;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, cgemvt_by);
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 1: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 2: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 3: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 4: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 5: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 6: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 7: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
case 8: gemvt_generic<cuFloatComplex, cgemvt_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, conj); break;
default: printf("CGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("CGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
extern "C"
int kblas_cgemv( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy)
{
return kblas_cgemv_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, 0);
}
extern "C"
int kblas_cgemv_async( char trans, int rows, int cols,
cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
cudaStream_t stream)
{
return kblas_cgemv_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, stream);
} |
d1904585a9f2e942e8c838d70a5058361c57bf6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Linear.h"
#include <hip/hip_cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cooperative_groups/reduce.h>
#include <mma.hpp>
namespace cg = cooperative_groups;
/*
* It returns the pointer of the top-left corner of give block in a matrix.
* Assume the matrix is stored in a row-major array.
* It needs the number of columns of the matrix (leading dimension).
*/
template <typename T, int SIZE = 16>
__device__ T *get_blk_start(T *data, const int row_blk, const int col_blk,
const int stride) {
auto res = &data[row_blk * SIZE * stride + SIZE * col_blk];
return res;
}
// A*B = C
__global__ void __kernel_blk_mmul_blk_bias_smem(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const int A_blk_row_num,
const half *__restrict__ B, half *__restrict__ C,
const half *__restrict__ bias, const int out_row_blk_num,
const int in_col_blk_num) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto grid = cg::this_grid();
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
auto tile_temp = &smem[warp.meta_group_rank() * 256];
const auto gwarp_id = grid.thread_rank() >> 5;
const auto total_warp = grid.size() / 32;
const auto total_tile = A_blk_row_num * out_row_blk_num;
for (int t = gwarp_id; t < total_tile; t += total_warp) {
// const auto out_col_blk_num = A_blk_row_num;
// out_row and out_col are untransposed positions
const auto out_row = gwarp_id / A_blk_row_num;
const auto out_col = gwarp_id % A_blk_row_num;
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c;
wmma::fill_fragment(c, 0);
for (auto i = A_row_ptr[out_col]; i < A_row_ptr[out_col + 1]; i++) {
wmma::load_matrix_sync(a, &A_data[i * 256], 16);
const half *src =
get_blk_start(B, out_row, A_row_offset[i], in_col_blk_num * 16);
wmma::load_matrix_sync(b, src, in_col_blk_num * 16);
wmma::mma_sync(c, a, b, c);
}
wmma::store_matrix_sync(tile_temp, c, 16, wmma::mem_col_major);
const auto bias_value = bias[out_col * 16 + warp.thread_rank() % 16];
#pragma unroll 8
for (int i = warp.thread_rank(); i < 256; i += warp.size()) {
tile_temp[i] += bias_value;
}
const auto dst = get_blk_start(C, out_row, out_col, A_blk_row_num * 16);
wmma::load_matrix_sync(c, tile_temp, 16, wmma::mem_col_major);
wmma::store_matrix_sync(dst, c, A_blk_row_num * 16,
wmma::mem_col_major);
}
}
Linear<tile_mat>::Linear(int _in_size, int _out_size, const tile_mat &w,
const half *b, int _size)
: weight(w), bias(b, _out_size), in_size(_in_size), out_size(_out_size),
size(_size) {}
Linear<tile_mat>::Linear(int _in_size, int _out_size, const tile_mat &w,
culib::CUDA_ptr<half> &b, int _size)
: weight(w), bias(b), in_size(_in_size), out_size(_out_size),
size(_size) {}
Linear<tile_mat>::Linear(int _in_size, int _out_size, tile_mat &&w,
const half *b, int _size)
: weight(std::move(w)), bias(b, _out_size), in_size(_in_size),
out_size(_out_size), size(_size) {}
Linear<tile_mat>::Linear(Linear<tile_mat> &&_linear)
: in_size(_linear.in_size), out_size(_linear.out_size), size(_linear.size),
weight(std::move(_linear.weight)), bias(std::move(_linear.bias)) {}
template <typename _Tg>
__device__ void clear_smem(const _Tg &group, void *smem, const int N) {
auto ptr = reinterpret_cast<int *>(smem);
#pragma unroll
for (int i = group.thread_rank(); i < N / sizeof(int); i += group.size()) {
ptr[i] = 0;
}
group.sync();
}
// A*B = C
template <int num_thd>
__global__ void __kernel_blk_mmul_blk_bias_smem_blk(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const int A_blk_row_num,
const half *__restrict__ B, half *__restrict__ C,
const half *__restrict__ bias, const int out_row_blk_num,
const int in_col_blk_num) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
auto tile_temp = &smem[warp.meta_group_rank() * 256];
const auto out_row = blockIdx.y;
const auto out_col = blockIdx.x;
const auto warp_id = cta.thread_rank() >> 5;
constexpr auto num_warp = num_thd >> 5;
// clear_smem(cta, smem, num_warp * 256 * sizeof(half));
// const auto out_col_blk_num = A_blk_row_num;
// out_row and out_col are untransposed positions
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c;
wmma::fill_fragment(c, 0);
const auto ldm = in_col_blk_num << 4;
#pragma unroll
for (auto i = A_row_ptr[out_col] + warp_id; i < A_row_ptr[out_col + 1];
i += num_warp) {
wmma::load_matrix_sync(a, &A_data[i << 8], 16);
auto src = &B[(out_row << 4) * ldm + (A_row_offset[i] << 4)];
wmma::load_matrix_sync(b, src, ldm);
wmma::mma_sync(c, a, b, c);
}
wmma::store_matrix_sync(tile_temp, c, 16, wmma::mem_col_major);
cta.sync();
// reduce across warp
int e = cta.thread_rank();
auto base = &smem[e];
auto sum = *base;
#pragma unroll
for (int i = 0; i < num_warp; i++) {
sum += base[i << 8];
}
*base = sum;
cta.sync();
// add bias
if (warp_id == 0) {
const auto bias_value = bias[(out_col << 4) + warp.thread_rank() % 16];
#pragma unroll 8
for (int i = warp.thread_rank(); i < 256; i += warp.size()) {
tile_temp[i] += bias_value;
}
const auto dst = get_blk_start(C, out_row, out_col, A_blk_row_num << 4);
wmma::load_matrix_sync(c, tile_temp, 16, wmma::mem_col_major);
wmma::store_matrix_sync(dst, c, A_blk_row_num << 4,
wmma::mem_col_major);
}
}
// void Linear<tile_mat>::forward(half *output, const half *const input,
// hipStream_t stream) {
// constexpr int num_thd = 256;
// auto smem_size = [=](int n) { return sizeof(half) * ((n / 32) * 256); };
// __kernel_blk_mmul_blk_bias_smem_blk<num_thd>
// <<<dim3(out_size / 16, size / 16), num_thd, smem_size(num_thd),
// stream>>>(weight.row_ptr.get(), weight.row_offset.get(),
// weight.data.get(), weight.blk_row_num, input, output,
// bias.get(), size / 16, in_size / 16);
// }
// void Linear<tile_mat>::forward(half *output, const half *const input,
// hipStream_t stream) {
// int num_blk = 160, num_thd = 576;
// auto smem_size = [=](int n) { return sizeof(half) * ((n / 32) * 256); };
// hipOccupancyMaxPotentialBlockSizeVariableSMem(
// &num_blk, &num_thd, __kernel_blk_mmul_blk_bias_smem, smem_size);
// __kernel_blk_mmul_blk_bias_smem<<<num_blk, num_thd, smem_size(num_thd),
// stream>>>(
// weight.row_ptr.get(), weight.row_offset.get(), weight.data.get(),
// weight.blk_row_num, input, output, bias.get(), size / 16, in_size /
// 16);
// }
// A*B = C
// A is CSC, B is transposed
template <int num_thd>
__global__ void __kernel_blk_mmul_blk_bias_smem_blk_outproduct(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const half *__restrict__ B,
const int in_col_blk_num, half *__restrict__ C,
const half *__restrict__ bias, const int ldB, const int ldC) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
auto eight_thd = cg::tiled_partition<8>(warp);
const auto K = blockIdx.x;
const auto warp_id = cta.thread_rank() >> 5;
constexpr auto num_warp = num_thd / 32;
auto tile_temp = &smem[warp_id * 256];
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c; // c is stored in col-major
wmma::fill_fragment(c, 0);
const auto start_idx = A_row_ptr[K];
const auto num_nz_tile = A_row_ptr[K + 1] - start_idx;
for (auto i = warp_id; i < num_nz_tile * in_col_blk_num; i += num_warp) {
const auto idx = (i % num_nz_tile) + start_idx;
const auto out_row = i / num_nz_tile;
const auto out_col = A_row_offset[idx];
wmma::load_matrix_sync(a, &A_data[idx << 8], 16);
auto src = get_blk_start(B, K, out_row, ldB);
wmma::load_matrix_sync(b, src, ldB);
wmma::mma_sync(c, a, b, c);
wmma::store_matrix_sync(tile_temp, c, 16, wmma::mem_col_major);
// FIXME add bias
const int eight_group_id = eight_thd.meta_group_rank();
const int eight_thd_id = eight_thd.thread_rank();
auto temp = &tile_temp[eight_group_id * 16];
auto dst = get_blk_start(C, out_row, out_col, ldC);
// for (int k = warp.thread_rank(); k < 256; k += 32) {
// auto r = k / 16, c = k % 16;
// atomicAdd(&dst[r * ldC + c], tile_temp[r * 16 + c]);
// }
auto dst2 = reinterpret_cast<half2 *>(dst);
for (int k = warp.thread_rank(); k < 128; k += 32) {
auto r = k / 8, c = k % 8;
half2 src{tile_temp[r * 16 + 2 * c], tile_temp[r * 16 + 2 * c + 1]};
dst2[r * ldC / 2 + c] += src;
// atomicAdd(&dst2[r * ldC / 2 + c], src);
}
}
}
// void Linear<tile_mat>::forward(half *output, const half *const input,
// hipStream_t stream) {
// constexpr int num_thd = 256;
// constexpr auto smem_size = sizeof(half) * ((num_thd / 32) * 256);
// assert(weight.row_ptr.size == (in_size / 16) + 1);
// __kernel_blk_mmul_blk_bias_smem_blk_outproduct<num_thd>
// <<<in_size / 16, num_thd, smem_size, stream>>>(
// weight.row_ptr.get(), weight.row_offset.get(), weight.data.get(),
// input, size / 16, output, bias.get(), size, out_size);
// }
static constexpr int FP16_skew = 16;
// A*B = C
template <int num_thd>
__global__ void __kernel_blk_mmul_blk_bias_smem_blk_skew(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const int A_blk_row_num,
const half *__restrict__ B, half *__restrict__ C,
const half *__restrict__ bias, const int out_row_blk_num,
const int in_col_blk_num) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
constexpr auto num_warp = num_thd >> 5;
const auto warp_id = cta.thread_rank() >> 5;
auto tile_temp = &smem[warp_id * 16];
constexpr auto tile_ldm = num_warp * 16 + FP16_skew;
const auto out_row = blockIdx.y;
const auto out_col = blockIdx.x;
// clear_smem(cta, smem, num_warp * 256 * sizeof(half));
// const auto out_col_blk_num = A_blk_row_num;
// out_row and out_col are untransposed positions
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c;
wmma::fill_fragment(c, 0);
const auto ldm = in_col_blk_num << 4;
#pragma unroll
for (auto i = A_row_ptr[out_col] + warp_id; i < A_row_ptr[out_col + 1];
i += num_warp) {
wmma::load_matrix_sync(a, &A_data[i << 8], 16);
auto src = &B[(out_row << 4) * ldm + (A_row_offset[i] << 4)];
wmma::load_matrix_sync(b, src, ldm);
wmma::mma_sync(c, a, b, c);
}
wmma::store_matrix_sync(tile_temp, c, tile_ldm, wmma::mem_col_major);
cta.sync();
// reduce across warp
int e = cta.thread_rank();
auto base = &smem[e/16 *tile_ldm + (e%16) ];
auto sum = *base;
#pragma unroll
for (int i = 1; i < num_warp; i++) {
sum += base[16*i];
}
*base = sum;
cta.sync();
// add bias
if (warp_id == 0) {
const auto sub_group_id = warp.thread_rank() % 2;
const auto sub_group_tid = warp.thread_rank() % 16;
const auto bias_value = bias[(out_col << 4) + sub_group_tid];
#pragma unroll 8
for (int r = sub_group_id; r < 16; r += 2) {
tile_temp[r * tile_ldm + sub_group_tid] += bias_value;
}
const auto dst = get_blk_start(C, out_row, out_col, A_blk_row_num << 4);
wmma::load_matrix_sync(c, tile_temp, tile_ldm, wmma::mem_col_major);
wmma::store_matrix_sync(dst, c, A_blk_row_num << 4,
wmma::mem_col_major);
}
}
void Linear<tile_mat>::forward(half *output, const half *const input,
hipStream_t stream) {
constexpr int num_thd = 256;
auto smem_size = [=](int n) {
const auto num_warp = n / 32;
return sizeof(half) * 16 * (num_warp * 16 + FP16_skew);
};
hipLaunchKernelGGL(( __kernel_blk_mmul_blk_bias_smem_blk_skew<num_thd>)
, dim3(dim3(out_size / 16, size / 16)), dim3(num_thd), smem_size(num_thd),
stream, weight.row_ptr.get(), weight.row_offset.get(),
weight.data.get(), weight.blk_row_num, input, output,
bias.get(), size / 16, in_size / 16);
} | d1904585a9f2e942e8c838d70a5058361c57bf6c.cu | #include "Linear.h"
#include <cooperative_groups.h>
#include <cooperative_groups/memcpy_async.h>
#include <cooperative_groups/reduce.h>
#include <mma.hpp>
namespace cg = cooperative_groups;
/*
* It returns the pointer of the top-left corner of give block in a matrix.
* Assume the matrix is stored in a row-major array.
* It needs the number of columns of the matrix (leading dimension).
*/
template <typename T, int SIZE = 16>
__device__ T *get_blk_start(T *data, const int row_blk, const int col_blk,
const int stride) {
auto res = &data[row_blk * SIZE * stride + SIZE * col_blk];
return res;
}
// A*B = C
__global__ void __kernel_blk_mmul_blk_bias_smem(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const int A_blk_row_num,
const half *__restrict__ B, half *__restrict__ C,
const half *__restrict__ bias, const int out_row_blk_num,
const int in_col_blk_num) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto grid = cg::this_grid();
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
auto tile_temp = &smem[warp.meta_group_rank() * 256];
const auto gwarp_id = grid.thread_rank() >> 5;
const auto total_warp = grid.size() / 32;
const auto total_tile = A_blk_row_num * out_row_blk_num;
for (int t = gwarp_id; t < total_tile; t += total_warp) {
// const auto out_col_blk_num = A_blk_row_num;
// out_row and out_col are untransposed positions
const auto out_row = gwarp_id / A_blk_row_num;
const auto out_col = gwarp_id % A_blk_row_num;
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c;
wmma::fill_fragment(c, 0);
for (auto i = A_row_ptr[out_col]; i < A_row_ptr[out_col + 1]; i++) {
wmma::load_matrix_sync(a, &A_data[i * 256], 16);
const half *src =
get_blk_start(B, out_row, A_row_offset[i], in_col_blk_num * 16);
wmma::load_matrix_sync(b, src, in_col_blk_num * 16);
wmma::mma_sync(c, a, b, c);
}
wmma::store_matrix_sync(tile_temp, c, 16, wmma::mem_col_major);
const auto bias_value = bias[out_col * 16 + warp.thread_rank() % 16];
#pragma unroll 8
for (int i = warp.thread_rank(); i < 256; i += warp.size()) {
tile_temp[i] += bias_value;
}
const auto dst = get_blk_start(C, out_row, out_col, A_blk_row_num * 16);
wmma::load_matrix_sync(c, tile_temp, 16, wmma::mem_col_major);
wmma::store_matrix_sync(dst, c, A_blk_row_num * 16,
wmma::mem_col_major);
}
}
Linear<tile_mat>::Linear(int _in_size, int _out_size, const tile_mat &w,
const half *b, int _size)
: weight(w), bias(b, _out_size), in_size(_in_size), out_size(_out_size),
size(_size) {}
Linear<tile_mat>::Linear(int _in_size, int _out_size, const tile_mat &w,
culib::CUDA_ptr<half> &b, int _size)
: weight(w), bias(b), in_size(_in_size), out_size(_out_size),
size(_size) {}
Linear<tile_mat>::Linear(int _in_size, int _out_size, tile_mat &&w,
const half *b, int _size)
: weight(std::move(w)), bias(b, _out_size), in_size(_in_size),
out_size(_out_size), size(_size) {}
Linear<tile_mat>::Linear(Linear<tile_mat> &&_linear)
: in_size(_linear.in_size), out_size(_linear.out_size), size(_linear.size),
weight(std::move(_linear.weight)), bias(std::move(_linear.bias)) {}
template <typename _Tg>
__device__ void clear_smem(const _Tg &group, void *smem, const int N) {
auto ptr = reinterpret_cast<int *>(smem);
#pragma unroll
for (int i = group.thread_rank(); i < N / sizeof(int); i += group.size()) {
ptr[i] = 0;
}
group.sync();
}
// A*B = C
template <int num_thd>
__global__ void __kernel_blk_mmul_blk_bias_smem_blk(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const int A_blk_row_num,
const half *__restrict__ B, half *__restrict__ C,
const half *__restrict__ bias, const int out_row_blk_num,
const int in_col_blk_num) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
auto tile_temp = &smem[warp.meta_group_rank() * 256];
const auto out_row = blockIdx.y;
const auto out_col = blockIdx.x;
const auto warp_id = cta.thread_rank() >> 5;
constexpr auto num_warp = num_thd >> 5;
// clear_smem(cta, smem, num_warp * 256 * sizeof(half));
// const auto out_col_blk_num = A_blk_row_num;
// out_row and out_col are untransposed positions
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c;
wmma::fill_fragment(c, 0);
const auto ldm = in_col_blk_num << 4;
#pragma unroll
for (auto i = A_row_ptr[out_col] + warp_id; i < A_row_ptr[out_col + 1];
i += num_warp) {
wmma::load_matrix_sync(a, &A_data[i << 8], 16);
auto src = &B[(out_row << 4) * ldm + (A_row_offset[i] << 4)];
wmma::load_matrix_sync(b, src, ldm);
wmma::mma_sync(c, a, b, c);
}
wmma::store_matrix_sync(tile_temp, c, 16, wmma::mem_col_major);
cta.sync();
// reduce across warp
int e = cta.thread_rank();
auto base = &smem[e];
auto sum = *base;
#pragma unroll
for (int i = 0; i < num_warp; i++) {
sum += base[i << 8];
}
*base = sum;
cta.sync();
// add bias
if (warp_id == 0) {
const auto bias_value = bias[(out_col << 4) + warp.thread_rank() % 16];
#pragma unroll 8
for (int i = warp.thread_rank(); i < 256; i += warp.size()) {
tile_temp[i] += bias_value;
}
const auto dst = get_blk_start(C, out_row, out_col, A_blk_row_num << 4);
wmma::load_matrix_sync(c, tile_temp, 16, wmma::mem_col_major);
wmma::store_matrix_sync(dst, c, A_blk_row_num << 4,
wmma::mem_col_major);
}
}
// void Linear<tile_mat>::forward(half *output, const half *const input,
// cudaStream_t stream) {
// constexpr int num_thd = 256;
// auto smem_size = [=](int n) { return sizeof(half) * ((n / 32) * 256); };
// __kernel_blk_mmul_blk_bias_smem_blk<num_thd>
// <<<dim3(out_size / 16, size / 16), num_thd, smem_size(num_thd),
// stream>>>(weight.row_ptr.get(), weight.row_offset.get(),
// weight.data.get(), weight.blk_row_num, input, output,
// bias.get(), size / 16, in_size / 16);
// }
// void Linear<tile_mat>::forward(half *output, const half *const input,
// cudaStream_t stream) {
// int num_blk = 160, num_thd = 576;
// auto smem_size = [=](int n) { return sizeof(half) * ((n / 32) * 256); };
// cudaOccupancyMaxPotentialBlockSizeVariableSMem(
// &num_blk, &num_thd, __kernel_blk_mmul_blk_bias_smem, smem_size);
// __kernel_blk_mmul_blk_bias_smem<<<num_blk, num_thd, smem_size(num_thd),
// stream>>>(
// weight.row_ptr.get(), weight.row_offset.get(), weight.data.get(),
// weight.blk_row_num, input, output, bias.get(), size / 16, in_size /
// 16);
// }
// A*B = C
// A is CSC, B is transposed
template <int num_thd>
__global__ void __kernel_blk_mmul_blk_bias_smem_blk_outproduct(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const half *__restrict__ B,
const int in_col_blk_num, half *__restrict__ C,
const half *__restrict__ bias, const int ldB, const int ldC) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
auto eight_thd = cg::tiled_partition<8>(warp);
const auto K = blockIdx.x;
const auto warp_id = cta.thread_rank() >> 5;
constexpr auto num_warp = num_thd / 32;
auto tile_temp = &smem[warp_id * 256];
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c; // c is stored in col-major
wmma::fill_fragment(c, 0);
const auto start_idx = A_row_ptr[K];
const auto num_nz_tile = A_row_ptr[K + 1] - start_idx;
for (auto i = warp_id; i < num_nz_tile * in_col_blk_num; i += num_warp) {
const auto idx = (i % num_nz_tile) + start_idx;
const auto out_row = i / num_nz_tile;
const auto out_col = A_row_offset[idx];
wmma::load_matrix_sync(a, &A_data[idx << 8], 16);
auto src = get_blk_start(B, K, out_row, ldB);
wmma::load_matrix_sync(b, src, ldB);
wmma::mma_sync(c, a, b, c);
wmma::store_matrix_sync(tile_temp, c, 16, wmma::mem_col_major);
// FIXME add bias
const int eight_group_id = eight_thd.meta_group_rank();
const int eight_thd_id = eight_thd.thread_rank();
auto temp = &tile_temp[eight_group_id * 16];
auto dst = get_blk_start(C, out_row, out_col, ldC);
// for (int k = warp.thread_rank(); k < 256; k += 32) {
// auto r = k / 16, c = k % 16;
// atomicAdd(&dst[r * ldC + c], tile_temp[r * 16 + c]);
// }
auto dst2 = reinterpret_cast<half2 *>(dst);
for (int k = warp.thread_rank(); k < 128; k += 32) {
auto r = k / 8, c = k % 8;
half2 src{tile_temp[r * 16 + 2 * c], tile_temp[r * 16 + 2 * c + 1]};
dst2[r * ldC / 2 + c] += src;
// atomicAdd(&dst2[r * ldC / 2 + c], src);
}
}
}
// void Linear<tile_mat>::forward(half *output, const half *const input,
// cudaStream_t stream) {
// constexpr int num_thd = 256;
// constexpr auto smem_size = sizeof(half) * ((num_thd / 32) * 256);
// assert(weight.row_ptr.size == (in_size / 16) + 1);
// __kernel_blk_mmul_blk_bias_smem_blk_outproduct<num_thd>
// <<<in_size / 16, num_thd, smem_size, stream>>>(
// weight.row_ptr.get(), weight.row_offset.get(), weight.data.get(),
// input, size / 16, output, bias.get(), size, out_size);
// }
static constexpr int FP16_skew = 16;
// A*B = C
template <int num_thd>
__global__ void __kernel_blk_mmul_blk_bias_smem_blk_skew(
const int *__restrict__ A_row_ptr, const int *__restrict__ A_row_offset,
const half *__restrict__ A_data, const int A_blk_row_num,
const half *__restrict__ B, half *__restrict__ C,
const half *__restrict__ bias, const int out_row_blk_num,
const int in_col_blk_num) {
// num_warp * 256
// temp_blk
extern __shared__ half smem[];
auto cta = cg::this_thread_block();
auto warp = cg::tiled_partition<32>(cta);
constexpr auto num_warp = num_thd >> 5;
const auto warp_id = cta.thread_rank() >> 5;
auto tile_temp = &smem[warp_id * 16];
constexpr auto tile_ldm = num_warp * 16 + FP16_skew;
const auto out_row = blockIdx.y;
const auto out_col = blockIdx.x;
// clear_smem(cta, smem, num_warp * 256 * sizeof(half));
// const auto out_col_blk_num = A_blk_row_num;
// out_row and out_col are untransposed positions
using frag_t = culib::mma::mma_t<16, 16, 16>;
frag_t::a_t<wmma::row_major> a;
frag_t::b_t<wmma::col_major> b;
frag_t::c_t<half> c;
wmma::fill_fragment(c, 0);
const auto ldm = in_col_blk_num << 4;
#pragma unroll
for (auto i = A_row_ptr[out_col] + warp_id; i < A_row_ptr[out_col + 1];
i += num_warp) {
wmma::load_matrix_sync(a, &A_data[i << 8], 16);
auto src = &B[(out_row << 4) * ldm + (A_row_offset[i] << 4)];
wmma::load_matrix_sync(b, src, ldm);
wmma::mma_sync(c, a, b, c);
}
wmma::store_matrix_sync(tile_temp, c, tile_ldm, wmma::mem_col_major);
cta.sync();
// reduce across warp
int e = cta.thread_rank();
auto base = &smem[e/16 *tile_ldm + (e%16) ];
auto sum = *base;
#pragma unroll
for (int i = 1; i < num_warp; i++) {
sum += base[16*i];
}
*base = sum;
cta.sync();
// add bias
if (warp_id == 0) {
const auto sub_group_id = warp.thread_rank() % 2;
const auto sub_group_tid = warp.thread_rank() % 16;
const auto bias_value = bias[(out_col << 4) + sub_group_tid];
#pragma unroll 8
for (int r = sub_group_id; r < 16; r += 2) {
tile_temp[r * tile_ldm + sub_group_tid] += bias_value;
}
const auto dst = get_blk_start(C, out_row, out_col, A_blk_row_num << 4);
wmma::load_matrix_sync(c, tile_temp, tile_ldm, wmma::mem_col_major);
wmma::store_matrix_sync(dst, c, A_blk_row_num << 4,
wmma::mem_col_major);
}
}
void Linear<tile_mat>::forward(half *output, const half *const input,
cudaStream_t stream) {
constexpr int num_thd = 256;
auto smem_size = [=](int n) {
const auto num_warp = n / 32;
return sizeof(half) * 16 * (num_warp * 16 + FP16_skew);
};
__kernel_blk_mmul_blk_bias_smem_blk_skew<num_thd>
<<<dim3(out_size / 16, size / 16), num_thd, smem_size(num_thd),
stream>>>(weight.row_ptr.get(), weight.row_offset.get(),
weight.data.get(), weight.blk_row_num, input, output,
bias.get(), size / 16, in_size / 16);
} |
dcdbbc4a2807638d5f8c48f53fb10afa86ad290d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define SIZE 1024*1024*4
__global__ void reduce0(unsigned int *g_idata, unsigned int *g_odata, long size){
// dynamically allocated shared memory
extern __shared__ unsigned int sdata[];
// set up thread ids: within a block, and across the grid
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
//initialize the shared sum array
sdata[tid] = 0;
sdata[tid] = g_idata[i];
// synchronize: so that all threads within a block have loaded
// the elements from GDRAM to the shared mem
__syncthreads();
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
// // version 1
// if (tid % (2*s) == 0) {
// sdata[tid] += sdata[tid + s];
// }
// //version 2
// int index = 2 * s * tid;
// if (index < blockDim.x) {
// sdata[index] += sdata[index + s];
// }
// __syncthreads(); // let one phase complete before the next starts
// }
// //version 3
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void){
hipEvent_t start_event, stop_event;
float cuda_elapsed_time;
// define a vector of certain "SIZE" and set values to "1"
// This means that the final sum will be equal to size.
// Easy for debugging!
// Dayum, am I smart or am I smart ?!
unsigned int *h_i, *h_o;
long size = SIZE;
h_i = (unsigned int *) malloc (sizeof(unsigned int)*SIZE);
for (unsigned int i = 0; i < SIZE; i ++)
h_i[i] = 1;
// declare a device array and copy the host array to the device array
// If the size of an array is smallish, one can use the Thrust lib
// Thrust is kickass!
unsigned int *d_i;
unsigned int threadsPerBlock = 128;
unsigned int totalBlocks = (SIZE+(threadsPerBlock-1))/threadsPerBlock;
// allocate space for output array on the host
h_o = (unsigned int*) malloc(totalBlocks * sizeof(unsigned int));
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
/* start the timer for GPU code */
hipEventRecord(start_event, 0);
gpuErrChk(hipMalloc((void**)&d_i, sizeof(unsigned int)*SIZE));
gpuErrChk(hipMemcpy(d_i, h_i, sizeof(unsigned int)*SIZE, hipMemcpyHostToDevice));
// define an output array on device which will hold the sum from
// each block
unsigned int *d_o;
gpuErrChk(hipMalloc((void**)&d_o, sizeof(unsigned int)*totalBlocks));
// Invoke the kernel: by the power of the greyskull!
hipLaunchKernelGGL(( reduce0), dim3(totalBlocks), dim3(threadsPerBlock), 2*threadsPerBlock*sizeof(unsigned int), 0, d_i, d_o, size);
// Copy the output array back and reduce on CPU
gpuErrChk(hipMemcpy(h_o, d_o, totalBlocks * sizeof(unsigned int), hipMemcpyDeviceToHost));
/*end the timer for GPU code */
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
for (unsigned int j = 1; j < totalBlocks; j++)
{
h_o[0] += h_o[j];
}
printf("Reduced Sum from GPU = %ld \n", h_o[0]);
printf("Time taken by the kernel: %f ms \n",cuda_elapsed_time);
return 0;
}
| dcdbbc4a2807638d5f8c48f53fb10afa86ad290d.cu | #include <stdio.h>
#define SIZE 1024*1024*4
__global__ void reduce0(unsigned int *g_idata, unsigned int *g_odata, long size){
// dynamically allocated shared memory
extern __shared__ unsigned int sdata[];
// set up thread ids: within a block, and across the grid
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
//initialize the shared sum array
sdata[tid] = 0;
sdata[tid] = g_idata[i];
// synchronize: so that all threads within a block have loaded
// the elements from GDRAM to the shared mem
__syncthreads();
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
// // version 1
// if (tid % (2*s) == 0) {
// sdata[tid] += sdata[tid + s];
// }
// //version 2
// int index = 2 * s * tid;
// if (index < blockDim.x) {
// sdata[index] += sdata[index + s];
// }
// __syncthreads(); // let one phase complete before the next starts
// }
// //version 3
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void){
cudaEvent_t start_event, stop_event;
float cuda_elapsed_time;
// define a vector of certain "SIZE" and set values to "1"
// This means that the final sum will be equal to size.
// Easy for debugging!
// Dayum, am I smart or am I smart ?!
unsigned int *h_i, *h_o;
long size = SIZE;
h_i = (unsigned int *) malloc (sizeof(unsigned int)*SIZE);
for (unsigned int i = 0; i < SIZE; i ++)
h_i[i] = 1;
// declare a device array and copy the host array to the device array
// If the size of an array is smallish, one can use the Thrust lib
// Thrust is kickass!
unsigned int *d_i;
unsigned int threadsPerBlock = 128;
unsigned int totalBlocks = (SIZE+(threadsPerBlock-1))/threadsPerBlock;
// allocate space for output array on the host
h_o = (unsigned int*) malloc(totalBlocks * sizeof(unsigned int));
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
/* start the timer for GPU code */
cudaEventRecord(start_event, 0);
gpuErrChk(cudaMalloc((void**)&d_i, sizeof(unsigned int)*SIZE));
gpuErrChk(cudaMemcpy(d_i, h_i, sizeof(unsigned int)*SIZE, cudaMemcpyHostToDevice));
// define an output array on device which will hold the sum from
// each block
unsigned int *d_o;
gpuErrChk(cudaMalloc((void**)&d_o, sizeof(unsigned int)*totalBlocks));
// Invoke the kernel: by the power of the greyskull!
reduce0<<<totalBlocks, threadsPerBlock, 2*threadsPerBlock*sizeof(unsigned int)>>>(d_i, d_o, size);
// Copy the output array back and reduce on CPU
gpuErrChk(cudaMemcpy(h_o, d_o, totalBlocks * sizeof(unsigned int), cudaMemcpyDeviceToHost));
/*end the timer for GPU code */
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
for (unsigned int j = 1; j < totalBlocks; j++)
{
h_o[0] += h_o[j];
}
printf("Reduced Sum from GPU = %ld \n", h_o[0]);
printf("Time taken by the kernel: %f ms \n",cuda_elapsed_time);
return 0;
}
|
271209acd259ce759ec10864380cfe73d434a557.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char *argv[]) {
// Size of vectors
int n = 25;
// Host input vectors
double *h_a;
double *h_b;
// Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
// Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n * sizeof(double);
// Allocate memory for each vector on host
h_a = (double *)malloc(bytes);
h_b = (double *)malloc(bytes);
h_c = (double *)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for (i = 0; i < n; i++) {
h_a[i] = i;
h_b[i] = i;
}
// Copy host vectors to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n / blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
// Sum up vector c and print result divided by n, this should equal 1 within
// error
double sum = 0;
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
// printf("final result: %f\n", sum/(double)n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 271209acd259ce759ec10864380cfe73d434a557.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char *argv[]) {
// Size of vectors
int n = 25;
// Host input vectors
double *h_a;
double *h_b;
// Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
// Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n * sizeof(double);
// Allocate memory for each vector on host
h_a = (double *)malloc(bytes);
h_b = (double *)malloc(bytes);
h_c = (double *)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for (i = 0; i < n; i++) {
h_a[i] = i;
h_b[i] = i;
}
// Copy host vectors to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n / blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
// Sum up vector c and print result divided by n, this should equal 1 within
// error
double sum = 0;
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
// printf("final result: %f\n", sum/(double)n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
653fdfa18965ffba3172b5bebe9d5c564ed7a51b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
to compile:
nvcc --gpu-architecture=compute_70 gpu_report_time.cu
requirement for reduction in the device
Hardware: kepler or newer architecture
nvcc: cuda 9 or newer
*/
// Comment out if you excute reduction in the host
#define ACCUMULATE_IN_DEVICE
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
const int N = (256 * 256);
const int BSIZE = 32;
__global__ void gpu_kernel(float *d_A, float *d_B, float *d_C, int len)
{
// write your code here
int i = blockDim.x * blockIdx.x + threadIdx.x;
float mysum = 0.0;
if (i < len) {
d_C[i] = 0.0;
for (int j = 0; j < len; j++) {
mysum += (d_A[i] - d_B[j]) * (d_A[i] - d_B[j]);
}
}
__syncthreads();
#ifdef ACCUMULATE_IN_DEVICE
for (int offset = 32/2; offset > 0; offset >>= 1){
mysum += __shfl_down_sync(0xffffffff, mysum, offset, 32);
}
if (threadIdx.x == 0) d_C[blockIdx.x] = mysum;
#else
d_C[i] = mysum;
#endif
}
__host__ void cpu_kernel(float *d_A, float *d_B, float *d_C, int len)
{
for (int i = 0; i < len; i++) {
d_C[i] = 0.0;
for (int j = 0; j < len; j++) {
d_C[i] += (d_A[i] - d_B[j]) * (d_A[i] - d_B[j]);
}
}
}
int main(int argc, char **argv)
{
float *h_A, *h_B, *h_C; // for host memory
float *d_A, *d_B, *d_C; // for device memory
float result; // resut
dim3 grid(N/BSIZE, 1, 1), block(BSIZE, 1, 1); // grid and block size
hipEvent_t start, stop; // for measument time on GPU
struct timeval start_time, end_time; // for measument time on CPU
float elapsed_time;
hipEventCreate(&start);
hipEventCreate(&stop);
/* host memory allocation */
h_A = (float *)malloc(sizeof(float) * N);
h_B = (float *)malloc(sizeof(float) * N);
h_C = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; ++i) {
h_A[i] = 1.0f; h_B[i] = 2.0f; h_C[i] = 0.0f;
}
/* device memory allocation */
hipMalloc((void **)&d_A, sizeof(float) * N);
hipMalloc((void **)&d_B, sizeof(float) * N);
hipMalloc((void **)&d_C, sizeof(float) * N);
/* copy data the host to the device */
hipMemcpy(d_A, h_A, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(float) * N, hipMemcpyHostToDevice);
/* The host calles the karnel */
hipEventRecord(start, 0);
hipLaunchKernelGGL(( gpu_kernel), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, N);
hipMemcpy(h_C, d_C, sizeof(float) * N, hipMemcpyDeviceToHost);
result = 0.0;
#ifdef ACCUMULATE_IN_DEVICE
for (int i = 0; i < N/BSIZE; ++i) {
result += h_C[i];
}
#else
for (int i = 0; i < N; ++i) {
result += h_C[i];
}
#endif
result /= (float) N;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
/* Result write back */
/* Release device memory */
hipFree(d_A); hipFree(d_B); hipFree(d_C);
/* check the result for GPU */
printf("GPU: result = %f, time = %f [msec]\n", result, elapsed_time);
/* check the result for CPU */
gettimeofday(&start_time, NULL);
cpu_kernel(h_A, h_B, h_C, N);
result = 0.0;
for (int i = 0; i < N; ++i)
result += h_C[i];
result /= (float) N;
gettimeofday(&end_time, NULL);
elapsed_time = (end_time.tv_sec - start_time.tv_sec) * 1000.0 +
(end_time.tv_usec - start_time.tv_usec) / 1000.0;
printf("CPU: result = %f, time = %f [msec]\n", result, elapsed_time);
/* Release host memory */
free(h_A); free(h_B); free(h_C);
return 0;
}
| 653fdfa18965ffba3172b5bebe9d5c564ed7a51b.cu | /*
to compile:
nvcc --gpu-architecture=compute_70 gpu_report_time.cu
requirement for reduction in the device
Hardware: kepler or newer architecture
nvcc: cuda 9 or newer
*/
// Comment out if you excute reduction in the host
#define ACCUMULATE_IN_DEVICE
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
const int N = (256 * 256);
const int BSIZE = 32;
__global__ void gpu_kernel(float *d_A, float *d_B, float *d_C, int len)
{
// write your code here
int i = blockDim.x * blockIdx.x + threadIdx.x;
float mysum = 0.0;
if (i < len) {
d_C[i] = 0.0;
for (int j = 0; j < len; j++) {
mysum += (d_A[i] - d_B[j]) * (d_A[i] - d_B[j]);
}
}
__syncthreads();
#ifdef ACCUMULATE_IN_DEVICE
for (int offset = 32/2; offset > 0; offset >>= 1){
mysum += __shfl_down_sync(0xffffffff, mysum, offset, 32);
}
if (threadIdx.x == 0) d_C[blockIdx.x] = mysum;
#else
d_C[i] = mysum;
#endif
}
__host__ void cpu_kernel(float *d_A, float *d_B, float *d_C, int len)
{
for (int i = 0; i < len; i++) {
d_C[i] = 0.0;
for (int j = 0; j < len; j++) {
d_C[i] += (d_A[i] - d_B[j]) * (d_A[i] - d_B[j]);
}
}
}
int main(int argc, char **argv)
{
float *h_A, *h_B, *h_C; // for host memory
float *d_A, *d_B, *d_C; // for device memory
float result; // resut
dim3 grid(N/BSIZE, 1, 1), block(BSIZE, 1, 1); // grid and block size
cudaEvent_t start, stop; // for measument time on GPU
struct timeval start_time, end_time; // for measument time on CPU
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* host memory allocation */
h_A = (float *)malloc(sizeof(float) * N);
h_B = (float *)malloc(sizeof(float) * N);
h_C = (float *)malloc(sizeof(float) * N);
for (int i = 0; i < N; ++i) {
h_A[i] = 1.0f; h_B[i] = 2.0f; h_C[i] = 0.0f;
}
/* device memory allocation */
cudaMalloc((void **)&d_A, sizeof(float) * N);
cudaMalloc((void **)&d_B, sizeof(float) * N);
cudaMalloc((void **)&d_C, sizeof(float) * N);
/* copy data the host to the device */
cudaMemcpy(d_A, h_A, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * N, cudaMemcpyHostToDevice);
/* The host calles the karnel */
cudaEventRecord(start, 0);
gpu_kernel<<<grid, block>>>(d_A, d_B, d_C, N);
cudaMemcpy(h_C, d_C, sizeof(float) * N, cudaMemcpyDeviceToHost);
result = 0.0;
#ifdef ACCUMULATE_IN_DEVICE
for (int i = 0; i < N/BSIZE; ++i) {
result += h_C[i];
}
#else
for (int i = 0; i < N; ++i) {
result += h_C[i];
}
#endif
result /= (float) N;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
/* Result write back */
/* Release device memory */
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
/* check the result for GPU */
printf("GPU: result = %f, time = %f [msec]\n", result, elapsed_time);
/* check the result for CPU */
gettimeofday(&start_time, NULL);
cpu_kernel(h_A, h_B, h_C, N);
result = 0.0;
for (int i = 0; i < N; ++i)
result += h_C[i];
result /= (float) N;
gettimeofday(&end_time, NULL);
elapsed_time = (end_time.tv_sec - start_time.tv_sec) * 1000.0 +
(end_time.tv_usec - start_time.tv_usec) / 1000.0;
printf("CPU: result = %f, time = %f [msec]\n", result, elapsed_time);
/* Release host memory */
free(h_A); free(h_B); free(h_C);
return 0;
}
|
b4dc197658c806c8045b2fc1d35b8c133af2aaa2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include "hip/hip_runtime.h"
__global__ void cubic(const float * d_in, float * d_out)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main()
{
const int ARRAY_SIZE=10;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0;i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
//Declare GPU memory.
float * d_in;
float * d_out;
hipMalloc((void **)&d_in, ARRAY_BYTES);
hipMalloc((void **)&d_out, ARRAY_BYTES);
//Tranfer array to GPU.
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cubic), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_in, d_out);
//copy bakc the results back to host.
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
//Output the results.
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f\n", h_out[i]);
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| b4dc197658c806c8045b2fc1d35b8c133af2aaa2.cu | #include <iostream>
#include <stdio.h>
#include "cuda_runtime.h"
__global__ void cubic(const float * d_in, float * d_out)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main()
{
const int ARRAY_SIZE=10;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0;i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
//Declare GPU memory.
float * d_in;
float * d_out;
cudaMalloc((void **)&d_in, ARRAY_BYTES);
cudaMalloc((void **)&d_out, ARRAY_BYTES);
//Tranfer array to GPU.
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cubic<<<1, ARRAY_SIZE>>>(d_in, d_out);
//copy bakc the results back to host.
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//Output the results.
for(int i=0; i < ARRAY_SIZE; i++) {
printf("%f\n", h_out[i]);
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
6f0c1aca21926a82c0256105e7e2a7a11edfabda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lm.h"
namespace kso {
namespace img {
namespace dspk {
__device__ float sinc(float x){
float y = 3.14 * x;
float A = sin(y);
float B = y;
float C;
if(x != 0.0){
C = A / B;
} else {
C = 1.0;
}
// printf("%f\n",C);
return C;
}
__device__ float local_kern_1D(uint X, uint ksz){
// // calculate offset for kernel
uint ks2 = ksz / 2;
float sig = 10 * ks2;
float var = sig * sig;
float x = (float) X - (float) ks2;
float x2 = x * x;
// return exp(-x2 / var) / (1.0 + x2);
return exp(-x2 / var);
}
__global__ void calc_gdt_0(float * gdt_0, float * gdt_2, float * gm, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float sum = 0.0;
// convolve over spectrum
for(uint c = 0; c < k_sz; c++){
// calculate offset
uint C = l - ks2 + c;
// truncate kernel if we're over the edge
if(C > (sz_l - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(c, k_sz);
// load from memory
float gm_i = gm[n_t * t + n_y * y + n_l * C];
float dt_i = gdt_2[n_t * t + n_y * y + n_l * C];
// update value of mean
sum = sum + (gm_i * dt_i * k_i);
// sum = sum + (dt_i * k_i);
}
gdt_0[n_t * t + n_y * y + n_l * l] = sum;
}
__global__ void calc_gdt_1(float * gdt_1, float * gdt_0, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float sum = 0.0;
// convolve over space
for(uint b = 0; b < k_sz; b++){
// calculate offset
uint B = y - ks2 + b;
// truncate kernel if we're over the edge
if(B > (sz_y - 1)) {
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(b, k_sz);
// load from memory
float dt_i = gdt_0[n_t * t + n_y * B + n_l * l];
// update value of mean
sum = sum + (dt_i * k_i);
}
gdt_1[n_t * t + n_y * y + n_l * l] = sum;
}
__global__ void calc_gdt_2(float * gdt_2, float * gdt_1, float * dt, float * gm, float * norm, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
uint sz_t = sz.z;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float sum = 0.0;
// convolve over time
for(uint a = 0; a < k_sz; a++){
// calculate offsets
uint A = t - ks2 + a;
// truncate the kernel if we're over the edge
if(A > (sz_t - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(a, k_sz);
// load from memory
float dt_i = gdt_1[n_t * A + n_y * y + n_l * l];
// update value of mean
sum = sum + (dt_i * k_i);
}
float dt_i = dt[n_t * t + n_y * y + n_l * l];
float gm_i = gm[n_t * t + n_y * y + n_l * l];
float norm_i = norm[n_t * t + n_y * y + n_l * l];
float bm_i = 1.0 - gm_i; // bad pixels are now equal to one.
float lm_i = sum / norm_i; // local mean that we've actually been calculating the whole time
gdt_2[n_t * t + n_y * y + n_l * l] = (dt_i * gm_i) + (bm_i * lm_i);
// dt_2[n_t * t + n_y * y + n_l * l] = (dt_i * gm_i) ;
}
__global__ void calc_lmn_0(float * norm_0, float * gm, uint * bad_pix, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float norm = 0.0;
// convolve over spectrum
for(uint c = 0; c < k_sz; c++){
// calculate offset
uint C = l - ks2 + c;
// truncate kernel if we're over the edge
if(C > (sz_l - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(c, k_sz);
// load from memory
float gm_i = gm[n_t * t + n_y * y + n_l * C];
// update value of mean
// norm = norm + (gm_i * k_i);
norm = norm + (k_i);
}
norm_0[n_t * t + n_y * y + n_l * l] = norm;
}
__global__ void calc_lmn_1(float * norm_1, float * norm_0, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float norm = 0.0;
// convolve over space
for(uint b = 0; b < k_sz; b++){
// calculate offset
uint B = y - ks2 + b;
// truncate kernel if we're over the edge
if(B > (sz_y - 1)) {
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(b, k_sz);
// load from memory
float norm_i = norm_0[n_t * t + n_y * B + n_l * l];
// update value of mean
norm = norm + (norm_i * k_i);
}
norm_1[n_t * t + n_y * y + n_l * l] = norm;
}
__global__ void calc_lmn_2(float * norm_2, float * norm_1, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
uint sz_t = sz.z;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float norm = 0.0;
// convolve over time
for(uint a = 0; a < k_sz; a++){
// calculate offsets
uint A = t - ks2 + a;
// truncate the kernel if we're over the edge
if(A > (sz_t - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(a, k_sz);
// load from memory
float norm_i = norm_1[n_t * A + n_y * y + n_l * l];
// update value of mean
norm = norm + (norm_i * k_i);
}
norm_2[n_t * t + n_y * y + n_l * l] = norm;
}
__global__ void calc_gdt(float * gdt, float * dt, float * gm, dim3 sz){
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
float dt_i = dt[n_t * t + n_y * y + n_l * l];
float gm_i = gm[n_t * t + n_y * y + n_l * l];
gdt[n_t * t + n_y * y + n_l * l] = dt_i * gm_i;
}
}
}
}
| 6f0c1aca21926a82c0256105e7e2a7a11edfabda.cu | #include "lm.h"
namespace kso {
namespace img {
namespace dspk {
__device__ float sinc(float x){
float y = 3.14 * x;
float A = sin(y);
float B = y;
float C;
if(x != 0.0){
C = A / B;
} else {
C = 1.0;
}
// printf("%f\n",C);
return C;
}
__device__ float local_kern_1D(uint X, uint ksz){
// // calculate offset for kernel
uint ks2 = ksz / 2;
float sig = 10 * ks2;
float var = sig * sig;
float x = (float) X - (float) ks2;
float x2 = x * x;
// return exp(-x2 / var) / (1.0 + x2);
return exp(-x2 / var);
}
__global__ void calc_gdt_0(float * gdt_0, float * gdt_2, float * gm, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float sum = 0.0;
// convolve over spectrum
for(uint c = 0; c < k_sz; c++){
// calculate offset
uint C = l - ks2 + c;
// truncate kernel if we're over the edge
if(C > (sz_l - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(c, k_sz);
// load from memory
float gm_i = gm[n_t * t + n_y * y + n_l * C];
float dt_i = gdt_2[n_t * t + n_y * y + n_l * C];
// update value of mean
sum = sum + (gm_i * dt_i * k_i);
// sum = sum + (dt_i * k_i);
}
gdt_0[n_t * t + n_y * y + n_l * l] = sum;
}
__global__ void calc_gdt_1(float * gdt_1, float * gdt_0, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float sum = 0.0;
// convolve over space
for(uint b = 0; b < k_sz; b++){
// calculate offset
uint B = y - ks2 + b;
// truncate kernel if we're over the edge
if(B > (sz_y - 1)) {
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(b, k_sz);
// load from memory
float dt_i = gdt_0[n_t * t + n_y * B + n_l * l];
// update value of mean
sum = sum + (dt_i * k_i);
}
gdt_1[n_t * t + n_y * y + n_l * l] = sum;
}
__global__ void calc_gdt_2(float * gdt_2, float * gdt_1, float * dt, float * gm, float * norm, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
uint sz_t = sz.z;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float sum = 0.0;
// convolve over time
for(uint a = 0; a < k_sz; a++){
// calculate offsets
uint A = t - ks2 + a;
// truncate the kernel if we're over the edge
if(A > (sz_t - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(a, k_sz);
// load from memory
float dt_i = gdt_1[n_t * A + n_y * y + n_l * l];
// update value of mean
sum = sum + (dt_i * k_i);
}
float dt_i = dt[n_t * t + n_y * y + n_l * l];
float gm_i = gm[n_t * t + n_y * y + n_l * l];
float norm_i = norm[n_t * t + n_y * y + n_l * l];
float bm_i = 1.0 - gm_i; // bad pixels are now equal to one.
float lm_i = sum / norm_i; // local mean that we've actually been calculating the whole time
gdt_2[n_t * t + n_y * y + n_l * l] = (dt_i * gm_i) + (bm_i * lm_i);
// dt_2[n_t * t + n_y * y + n_l * l] = (dt_i * gm_i) ;
}
__global__ void calc_lmn_0(float * norm_0, float * gm, uint * bad_pix, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float norm = 0.0;
// convolve over spectrum
for(uint c = 0; c < k_sz; c++){
// calculate offset
uint C = l - ks2 + c;
// truncate kernel if we're over the edge
if(C > (sz_l - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(c, k_sz);
// load from memory
float gm_i = gm[n_t * t + n_y * y + n_l * C];
// update value of mean
// norm = norm + (gm_i * k_i);
norm = norm + (k_i);
}
norm_0[n_t * t + n_y * y + n_l * l] = norm;
}
__global__ void calc_lmn_1(float * norm_1, float * norm_0, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float norm = 0.0;
// convolve over space
for(uint b = 0; b < k_sz; b++){
// calculate offset
uint B = y - ks2 + b;
// truncate kernel if we're over the edge
if(B > (sz_y - 1)) {
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(b, k_sz);
// load from memory
float norm_i = norm_0[n_t * t + n_y * B + n_l * l];
// update value of mean
norm = norm + (norm_i * k_i);
}
norm_1[n_t * t + n_y * y + n_l * l] = norm;
}
__global__ void calc_lmn_2(float * norm_2, float * norm_1, dim3 sz, uint k_sz){
// calculate offset for kernel
uint ks2 = k_sz / 2;
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
uint sz_t = sz.z;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
// initialize neighborhood mean
float norm = 0.0;
// convolve over time
for(uint a = 0; a < k_sz; a++){
// calculate offsets
uint A = t - ks2 + a;
// truncate the kernel if we're over the edge
if(A > (sz_t - 1)){
continue;
}
// calculate kernel at this point
float k_i = local_kern_1D(a, k_sz);
// load from memory
float norm_i = norm_1[n_t * A + n_y * y + n_l * l];
// update value of mean
norm = norm + (norm_i * k_i);
}
norm_2[n_t * t + n_y * y + n_l * l] = norm;
}
__global__ void calc_gdt(float * gdt, float * dt, float * gm, dim3 sz){
// retrieve sizes
uint sz_l = sz.x;
uint sz_y = sz.y;
// compute stride sizes
uint n_l = 1;
uint n_y = n_l * sz_l;
uint n_t = n_y * sz_y;
// retrieve coordinates from thread and block id.
uint l = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
uint t = blockIdx.z * blockDim.z + threadIdx.z;
float dt_i = dt[n_t * t + n_y * y + n_l * l];
float gm_i = gm[n_t * t + n_y * y + n_l * l];
gdt[n_t * t + n_y * y + n_l * l] = dt_i * gm_i;
}
}
}
}
|
dd3fe83fbeea09dd02211fca8939ece388ac1496.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/l1_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void CombineMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
if(in[index] == 0 | out[index] == 0 | out[index] != out[index])
out[index] = 0;
else
out[index] = 1;
//out[index] = (in[index] + out[index]) > Dtype(0.5) ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
__global__ void MaskMaxErrors(const int n, const Dtype* in, Dtype* out, Dtype max_error) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) > max_error) ? Dtype(0) : out[index];
}
}
template <typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Blob<Dtype> *diffptr = diff_top_vec_[0];
Dtype dot, loss;
if(bottom.size() > 1) {
diff_layer_->Forward(diff_bottom_vec_, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
// for backward
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_backward_);
normalize_coeff_backward_ /= mask_.channels();
} else {
normalize_coeff_backward_ = Dtype(count) / mask_.channels();
}
if(bottom.size() == 3) {
hipLaunchKernelGGL(( CombineMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[2]->gpu_data(), mask_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
// for forward
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_forward_);
normalize_coeff_forward_ /= mask_.channels();
} else {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_forward_);
normalize_coeff_forward_ /= mask_.channels();
}
if(this->layer_param_.l1_loss_param().max_error() > 0) {
// looking for those region that larger than ... max_error. this region will not back propagate
caffe_copy(bottom[0]->count(), mask_.gpu_data(),
temp_.mutable_gpu_data());
hipLaunchKernelGGL(( MaskMaxErrors<Dtype>), dim3(CAFFE_GET_BLOCKS(temp_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
temp_.count(), diffptr->gpu_data(), temp_.mutable_gpu_data(), this->layer_param_.l1_loss_param().max_error());
}
if (this->layer_param_.l1_loss_param().l2_per_location()) {
// set masked (NaNs only) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if(this->layer_param_.l1_loss_param().plateau() > 0) {
float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
hipLaunchKernelGGL(( MaskPlateauValuesInitial<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
}
else {
// Mask plateau:
if(this->layer_param_.l1_loss_param().plateau() > 0) {
hipLaunchKernelGGL(( MaskPlateauValues<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ComputeSign<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
if(normalize_coeff_forward_ == 0)
loss = 0;
else
loss = dot / normalize_coeff_forward_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
bool prop_down = propagate_down[0];
if(bottom.size() > 1) prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
// calculate mask ratio
Dtype ratio = normalize_coeff_forward_ / bottom[0]->count();
Dtype alpha = 0;
if(normalize_coeff_backward_ != 0 )
alpha = top[0]->cpu_diff()[0] / normalize_coeff_backward_;
if( bottom.size() == 3 && ratio > this->layer_param_.l1_loss_param().ratio())
alpha = Dtype(0);
if (this->layer_param_.l1_loss_param().l2_per_location()) {
vector<bool> prop_down(1,true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if(this->layer_param_.l1_loss_param().plateau() > 0) {
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
}
else {
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
// mask gradients
if(this->layer_param_.l1_loss_param().max_error() == 0) {
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}else{
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
diffptr->count(), temp_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
if(bottom.size() > 1) {
diff_layer_->Backward(diff_top_vec_, propagate_down, diff_bottom_vec_);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe
| dd3fe83fbeea09dd02211fca8939ece388ac1496.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/l1_loss_layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void CombineMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
if(in[index] == 0 | out[index] == 0 | out[index] != out[index])
out[index] = 0;
else
out[index] = 1;
//out[index] = (in[index] + out[index]) > Dtype(0.5) ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
__global__ void MaskMaxErrors(const int n, const Dtype* in, Dtype* out, Dtype max_error) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) > max_error) ? Dtype(0) : out[index];
}
}
template <typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Blob<Dtype> *diffptr = diff_top_vec_[0];
Dtype dot, loss;
if(bottom.size() > 1) {
diff_layer_->Forward(diff_bottom_vec_, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
// for backward
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_backward_);
normalize_coeff_backward_ /= mask_.channels();
} else {
normalize_coeff_backward_ = Dtype(count) / mask_.channels();
}
if(bottom.size() == 3) {
CombineMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[2]->gpu_data(), mask_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
// for forward
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_forward_);
normalize_coeff_forward_ /= mask_.channels();
} else {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_forward_);
normalize_coeff_forward_ /= mask_.channels();
}
if(this->layer_param_.l1_loss_param().max_error() > 0) {
// looking for those region that larger than ... max_error. this region will not back propagate
caffe_copy(bottom[0]->count(), mask_.gpu_data(),
temp_.mutable_gpu_data());
MaskMaxErrors<Dtype><<<CAFFE_GET_BLOCKS(temp_.count()), CAFFE_CUDA_NUM_THREADS>>>(
temp_.count(), diffptr->gpu_data(), temp_.mutable_gpu_data(), this->layer_param_.l1_loss_param().max_error());
}
if (this->layer_param_.l1_loss_param().l2_per_location()) {
// set masked (NaNs only) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if(this->layer_param_.l1_loss_param().plateau() > 0) {
float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
MaskPlateauValuesInitial<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
}
else {
// Mask plateau:
if(this->layer_param_.l1_loss_param().plateau() > 0) {
MaskPlateauValues<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
ComputeSign<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
if(normalize_coeff_forward_ == 0)
loss = 0;
else
loss = dot / normalize_coeff_forward_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
bool prop_down = propagate_down[0];
if(bottom.size() > 1) prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
// calculate mask ratio
Dtype ratio = normalize_coeff_forward_ / bottom[0]->count();
Dtype alpha = 0;
if(normalize_coeff_backward_ != 0 )
alpha = top[0]->cpu_diff()[0] / normalize_coeff_backward_;
if( bottom.size() == 3 && ratio > this->layer_param_.l1_loss_param().ratio())
alpha = Dtype(0);
if (this->layer_param_.l1_loss_param().l2_per_location()) {
vector<bool> prop_down(1,true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if(this->layer_param_.l1_loss_param().plateau() > 0) {
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
}
else {
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
// mask gradients
if(this->layer_param_.l1_loss_param().max_error() == 0) {
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>(
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}else{
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>(
diffptr->count(), temp_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
if(bottom.size() > 1) {
diff_layer_->Backward(diff_top_vec_, propagate_down, diff_bottom_vec_);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe
|
decc1a429482d792a1fcafe8b05e512c166a3170.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int offset_x = blockIdx.x * blockDim.x;
int offset_y = blockIdx.y * blockDim.y;
int x = offset_x + threadIdx.x;
int y = offset_y + threadIdx.y;
if (x >= numCols || y >= numRows){
return;
}
float weighted_sum = 0;
for(int i = 0; i< filterWidth; ++i){
for(int j = 0; j< filterWidth; ++j){
int row = min(max(0, y - filterWidth / 2 + i), numRows - 1);
int column = min(max(0, x - filterWidth / 2 + j), numCols - 1);
float value = inputChannel[row * numCols + column];
float weight = filter[i * filterWidth + j];
weighted_sum += (float)value * weight;
}
}
int idx = y * numCols + x;
outputChannel[idx] = (unsigned char)weighted_sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int offset_x = blockIdx.x * blockDim.x;
int offset_y = blockIdx.y * blockDim.y;
int x = offset_x + threadIdx.x;
int y = offset_y + threadIdx.y;
if (x >= numCols || y >= numRows){
return;
}
int idx = y * numCols + x;
auto pixel = inputImageRGBA[idx];
redChannel[idx] = pixel.x;
greenChannel[idx] = pixel.y;
blueChannel[idx] = pixel.z;
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / blockSize.x + 1, numRows / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue );
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| decc1a429482d792a1fcafe8b05e512c166a3170.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int offset_x = blockIdx.x * blockDim.x;
int offset_y = blockIdx.y * blockDim.y;
int x = offset_x + threadIdx.x;
int y = offset_y + threadIdx.y;
if (x >= numCols || y >= numRows){
return;
}
float weighted_sum = 0;
for(int i = 0; i< filterWidth; ++i){
for(int j = 0; j< filterWidth; ++j){
int row = min(max(0, y - filterWidth / 2 + i), numRows - 1);
int column = min(max(0, x - filterWidth / 2 + j), numCols - 1);
float value = inputChannel[row * numCols + column];
float weight = filter[i * filterWidth + j];
weighted_sum += (float)value * weight;
}
}
int idx = y * numCols + x;
outputChannel[idx] = (unsigned char)weighted_sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int offset_x = blockIdx.x * blockDim.x;
int offset_y = blockIdx.y * blockDim.y;
int x = offset_x + threadIdx.x;
int y = offset_y + threadIdx.y;
if (x >= numCols || y >= numRows){
return;
}
int idx = y * numCols + x;
auto pixel = inputImageRGBA[idx];
redChannel[idx] = pixel.x;
greenChannel[idx] = pixel.y;
blueChannel[idx] = pixel.z;
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(numCols / blockSize.x + 1, numRows / blockSize.y + 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue );
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
1b721f765af21a912404edbdede0db8220831afd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void printHello() {
printf("The Device says \"Hello World\"\n");
} | 1b721f765af21a912404edbdede0db8220831afd.cu | #include "includes.h"
__global__ void printHello() {
printf("The Device says \"Hello World\"\n");
} |
b8226e782627a51ae0d1198fff74bcadaa7f0e1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../cuda_utils.h"
#include "knnquery_cuda_kernel.h"
// input: xyz (b, n, 3) new_xyz (b, m, 3)
// output: idx (b, m, nsample) dist2 (b, m, nsample)
__global__ void knnquery_cuda_kernel(int b, int n, int m, int nsample, const float *__restrict__ xyz, const float *__restrict__ new_xyz, int *__restrict__ idx, float *__restrict__ dist2) {
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
//double* best = new double[nsample];
//int* besti = new int[nsample];
double best[200];
int besti[200];
for(int i = 0; i < nsample; i++){
best[i] = 1e40;
besti[i] = 0;
}
for(int k = 0; k < n; k++){
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
for(int j = 0; j < nsample; j++){
if(d2 < best[j]){
for(int i = nsample - 1; i > j; i--){
best[i] = best[i - 1];
besti[i] = besti[i - 1];
}
best[j] = d2;
besti[j] = k;
break;
}
}
}
for(int i = 0; i < nsample; i++){
idx[i] = besti[i];
dist2[i] = best[i];
}
//delete []best;
//delete []besti;
}
void knnquery_cuda_launcher(int b, int n, int m, int nsample, const float *xyz, const float *new_xyz, int *idx, float *dist2, hipStream_t stream) {
// param new_xyz: (B, m, 3)
// param xyz: (B, n, 3)
// param idx: (B, m, nsample)
hipError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( knnquery_cuda_kernel), dim3(blocks), dim3(threads), 0, stream, b, n, m, nsample, xyz, new_xyz, idx, dist2);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
} | b8226e782627a51ae0d1198fff74bcadaa7f0e1c.cu | #include "../cuda_utils.h"
#include "knnquery_cuda_kernel.h"
// input: xyz (b, n, 3) new_xyz (b, m, 3)
// output: idx (b, m, nsample) dist2 (b, m, nsample)
__global__ void knnquery_cuda_kernel(int b, int n, int m, int nsample, const float *__restrict__ xyz, const float *__restrict__ new_xyz, int *__restrict__ idx, float *__restrict__ dist2) {
int bs_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || pt_idx >= m) return;
new_xyz += bs_idx * m * 3 + pt_idx * 3;
xyz += bs_idx * n * 3;
idx += bs_idx * m * nsample + pt_idx * nsample;
float new_x = new_xyz[0];
float new_y = new_xyz[1];
float new_z = new_xyz[2];
//double* best = new double[nsample];
//int* besti = new int[nsample];
double best[200];
int besti[200];
for(int i = 0; i < nsample; i++){
best[i] = 1e40;
besti[i] = 0;
}
for(int k = 0; k < n; k++){
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
for(int j = 0; j < nsample; j++){
if(d2 < best[j]){
for(int i = nsample - 1; i > j; i--){
best[i] = best[i - 1];
besti[i] = besti[i - 1];
}
best[j] = d2;
besti[j] = k;
break;
}
}
}
for(int i = 0; i < nsample; i++){
idx[i] = besti[i];
dist2[i] = best[i];
}
//delete []best;
//delete []besti;
}
void knnquery_cuda_launcher(int b, int n, int m, int nsample, const float *xyz, const float *new_xyz, int *idx, float *dist2, cudaStream_t stream) {
// param new_xyz: (B, m, 3)
// param xyz: (B, n, 3)
// param idx: (B, m, nsample)
cudaError_t err;
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
knnquery_cuda_kernel<<<blocks, threads, 0, stream>>>(b, n, m, nsample, xyz, new_xyz, idx, dist2);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
} |
09a1d401c40a51e48c9343973635d22703f3b7b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define SEP_LINE_LENGTH 20
typedef struct gridTopology
{
dim3 blockSize;
dim3 gridSize;
} gridTopology;
typedef struct pixelCoords
{
int x, y;
} pixelCoords;
gridTopology initGridTopology2D(int r, int c);
void gridDataReport(gridTopology t, int nRows, int nCols);
void printLineOf(char c);
__device__ int validThread(gridTopology t);
__global__ void testInvalidThreads(int r, int c);
int main(int argc, char *argv[])
{
int nRows = 10, nCols = 10;
gridTopology t = initGridTopology2D(nRows, nCols);
// gridDataReport(t, nRows, nCols);
hipLaunchKernelGGL(( testInvalidThreads), dim3(t.gridSize), dim3(t.blockSize), 0, 0, nRows, nCols);
hipDeviceSynchronize(); // flush the printf of threads !
// hipDeviceReset();
return EXIT_SUCCESS;
}
gridTopology initGridTopology2D(int nRows, int nCols)
{
hipDeviceProp_t deviceProp;
int dev = 0;
hipGetDeviceProperties(&deviceProp, dev);
int maxGridSizeX = deviceProp.maxGridSize[0];
int maxGridSizeY = deviceProp.maxGridSize[1];
int maxThreadsX = deviceProp.maxThreadsDim[0];
int maxThreadsY = deviceProp.maxThreadsDim[1];
gridTopology t;
int blockSizeX = sqrt(deviceProp.maxThreadsPerBlock);
if (blockSizeX > maxThreadsX) perror("! Error");
int blockSizeY = blockSizeX;
if (blockSizeY > maxThreadsY) { }
int nBlocksX = (nCols + blockSizeX - 1) / blockSizeX;
if (nBlocksX > maxGridSizeX) { }
int nBlocksY = (nRows + blockSizeY - 1) / blockSizeY;
if (nBlocksY > maxGridSizeY) { }
t.blockSize = dim3(blockSizeX, blockSizeY, 0);
t.gridSize = dim3(nBlocksX, nBlocksY, 0);
return t;
}
void gridDataReport(gridTopology t, int nRows, int nCols)
{
printLineOf('*');
printf("Sizes of blocks: x:%d, y:%d\n", t.blockSize.x,
t.blockSize.y);
printf("Sizes of grid: x:%d, y:%d\n", t.gridSize.x,
t.gridSize.y);
printf("nRows: %d, nCols: %d; total pixels: %d\n",
nRows, nCols, nRows * nCols);
printf("number of threads: %d\n",
t.blockSize.x * t.blockSize.y //
* t.gridSize.x * t.gridSize.y);
printf("number of threads - 1 block in x - 1 block in y: %d\n",
t.blockSize.x * t.blockSize.y //
* (t.gridSize.x - 1) * (t.gridSize.y - 1) );
printLineOf('*');
}
__device__ int validThread(pixelCoords p)
{
// Checks if the thread should compute or not,
// according to its position in the CUDA grid,
// with respect to the original 2D matrix size - e.g. an image.
return (p.x > -1 && p.y > -1) ? 1 : 0;
}
__device__ pixelCoords computeThread2DCoordinates(int r, int c)
{
// Each function that uses this function MUST check
// that both pixelX and pixelY are != -1 .
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
// NOTE: row and column of the matrix have +1 w.r.t. thread
// x and y coordinates !
if (pixelX >= c) pixelX = -1; // this thread is out of bounds
if (pixelY >= r) pixelY = -1; // this thread is out of bounds
pixelCoords p = {pixelX, pixelY};
return p;
}
__global__ void testInvalidThreads(int r, int c)
{
printf("ok\n");
// pixelCoords p = computeThread2DCoordinates(r, c);
// if (!validThread(p))
// printf("Thread x:%d y:%d has nothing to do here.\n",
// p.x, p.y);
// else printf("ciao\n");
}
void printLineOf(const char c)
{
int i;
for (i = 0; i < SEP_LINE_LENGTH; i++)
{
printf(" %c", c);
}
printf("\n");
}
| 09a1d401c40a51e48c9343973635d22703f3b7b3.cu | #include <stdio.h>
#define SEP_LINE_LENGTH 20
typedef struct gridTopology
{
dim3 blockSize;
dim3 gridSize;
} gridTopology;
typedef struct pixelCoords
{
int x, y;
} pixelCoords;
gridTopology initGridTopology2D(int r, int c);
void gridDataReport(gridTopology t, int nRows, int nCols);
void printLineOf(char c);
__device__ int validThread(gridTopology t);
__global__ void testInvalidThreads(int r, int c);
int main(int argc, char *argv[])
{
int nRows = 10, nCols = 10;
gridTopology t = initGridTopology2D(nRows, nCols);
// gridDataReport(t, nRows, nCols);
testInvalidThreads<<<t.gridSize, t.blockSize>>>(nRows, nCols);
cudaDeviceSynchronize(); // flush the printf of threads !
// cudaDeviceReset();
return EXIT_SUCCESS;
}
gridTopology initGridTopology2D(int nRows, int nCols)
{
cudaDeviceProp deviceProp;
int dev = 0;
cudaGetDeviceProperties(&deviceProp, dev);
int maxGridSizeX = deviceProp.maxGridSize[0];
int maxGridSizeY = deviceProp.maxGridSize[1];
int maxThreadsX = deviceProp.maxThreadsDim[0];
int maxThreadsY = deviceProp.maxThreadsDim[1];
gridTopology t;
int blockSizeX = sqrt(deviceProp.maxThreadsPerBlock);
if (blockSizeX > maxThreadsX) perror("! Error");
int blockSizeY = blockSizeX;
if (blockSizeY > maxThreadsY) { }
int nBlocksX = (nCols + blockSizeX - 1) / blockSizeX;
if (nBlocksX > maxGridSizeX) { }
int nBlocksY = (nRows + blockSizeY - 1) / blockSizeY;
if (nBlocksY > maxGridSizeY) { }
t.blockSize = dim3(blockSizeX, blockSizeY, 0);
t.gridSize = dim3(nBlocksX, nBlocksY, 0);
return t;
}
void gridDataReport(gridTopology t, int nRows, int nCols)
{
printLineOf('*');
printf("Sizes of blocks: x:%d, y:%d\n", t.blockSize.x,
t.blockSize.y);
printf("Sizes of grid: x:%d, y:%d\n", t.gridSize.x,
t.gridSize.y);
printf("nRows: %d, nCols: %d; total pixels: %d\n",
nRows, nCols, nRows * nCols);
printf("number of threads: %d\n",
t.blockSize.x * t.blockSize.y //
* t.gridSize.x * t.gridSize.y);
printf("number of threads - 1 block in x - 1 block in y: %d\n",
t.blockSize.x * t.blockSize.y //
* (t.gridSize.x - 1) * (t.gridSize.y - 1) );
printLineOf('*');
}
__device__ int validThread(pixelCoords p)
{
// Checks if the thread should compute or not,
// according to its position in the CUDA grid,
// with respect to the original 2D matrix size - e.g. an image.
return (p.x > -1 && p.y > -1) ? 1 : 0;
}
__device__ pixelCoords computeThread2DCoordinates(int r, int c)
{
// Each function that uses this function MUST check
// that both pixelX and pixelY are != -1 .
int pixelX = blockIdx.x * blockDim.x + threadIdx.x;
int pixelY = blockIdx.y * blockDim.y + threadIdx.y;
// NOTE: row and column of the matrix have +1 w.r.t. thread
// x and y coordinates !
if (pixelX >= c) pixelX = -1; // this thread is out of bounds
if (pixelY >= r) pixelY = -1; // this thread is out of bounds
pixelCoords p = {pixelX, pixelY};
return p;
}
__global__ void testInvalidThreads(int r, int c)
{
printf("ok\n");
// pixelCoords p = computeThread2DCoordinates(r, c);
// if (!validThread(p))
// printf("Thread x:%d y:%d has nothing to do here.\n",
// p.x, p.y);
// else printf("ciao\n");
}
void printLineOf(const char c)
{
int i;
for (i = 0; i < SEP_LINE_LENGTH; i++)
{
printf(" %c", c);
}
printf("\n");
}
|
ca12720029abbaaf46e7126d6cd233296cfead4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fillPartitionLabelKernel(int size, int *coarseAggregate, int *fineAggregateSort, int *partitionLabel)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
partitionLabel[idx] = coarseAggregate[ fineAggregateSort[idx] ];
}
} | ca12720029abbaaf46e7126d6cd233296cfead4b.cu | #include "includes.h"
__global__ void fillPartitionLabelKernel(int size, int *coarseAggregate, int *fineAggregateSort, int *partitionLabel)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
partitionLabel[idx] = coarseAggregate[ fineAggregateSort[idx] ];
}
} |
86ea71d91e5a0da64f5f4e16c5c6a110a3088cc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/pixelwise_similarity_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void PixelwiseSimilarityLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
this->Forward_cpu(bottom, top);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void PixelwiseSimilarityLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
this->Backward_cpu(top, propagate_down, bottom);
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PixelwiseSimilarityLayer);
} // namespace caffe
| 86ea71d91e5a0da64f5f4e16c5c6a110a3088cc1.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/pixelwise_similarity_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void PixelwiseSimilarityLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
this->Forward_cpu(bottom, top);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void PixelwiseSimilarityLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
this->Backward_cpu(top, propagate_down, bottom);
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PixelwiseSimilarityLayer);
} // namespace caffe
|
92166cfacaf912fe37cf73b27144829bb04a92b2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "histColorsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *histColors = NULL;
hipMalloc(&histColors, XSIZE*YSIZE);
hiprandState_t *states = NULL;
hipMalloc(&states, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
histColorsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, histColors,states);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
histColorsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, histColors,states);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
histColorsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, histColors,states);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 92166cfacaf912fe37cf73b27144829bb04a92b2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "histColorsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *histColors = NULL;
cudaMalloc(&histColors, XSIZE*YSIZE);
curandState *states = NULL;
cudaMalloc(&states, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
histColorsKernel<<<gridBlock,threadBlock>>>(histColors,states);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
histColorsKernel<<<gridBlock,threadBlock>>>(histColors,states);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
histColorsKernel<<<gridBlock,threadBlock>>>(histColors,states);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7a42cae925f38a6c4a7fa2156bc9d38199aa08c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <vector>
#include <cstring>
#include <gpu/gpu_img_transform_stream.cuh>
#include "gpu/gpu_utilities.cuh"
#include "common/utilities.hpp"
#include "gpu/gpu_img_transform.cuh"
int main(int argc, char **argv)
{
std::string img_out; // default values in utilities.hpp
std::string img_in;
GpuUtilMenuSelection menuSelection;
GpuUtilMenuSelection::initParameters(img_in, img_out, menuSelection, argc, argv);
GpuUtilExecutionInfo info;
cv::Mat m_in = cv::imread(img_in, cv::IMREAD_UNCHANGED);
unsigned char* rgb_out = nullptr, *rgb_in = nullptr;
hipHostMalloc(&rgb_in, 3 * m_in.rows * m_in.cols);
hipHostMalloc(&rgb_out, 3 * m_in.rows * m_in.cols);
memcpy(rgb_in, m_in.data, 3 * m_in.rows * m_in.cols);
cv::Mat m_in_aux(m_in.rows, m_in.cols, CV_8UC3, rgb_in);
cv::Mat m_out(m_in.rows, m_in.cols, CV_8UC3, rgb_out);
int (*fnc_exec) (cv::Mat&, cv::Mat&, GpuUtilExecutionInfo& );
// menuSelection.use_shared = true;
// menuSelection.nb_stream = 4;
if( menuSelection.nb_stream == 0) {
if (!menuSelection.use_shared)
fnc_exec = GpuImgTransform::execute;
else
fnc_exec = GpuImgTransform::executeSharedMemMode;
}else {
if( menuSelection.nb_stream > m_in.rows ) menuSelection.nb_stream = 1;
if (!menuSelection.use_shared)
fnc_exec = GpuImgTransformStream::execute;
else
fnc_exec = GpuImgTransformStream::executeSharedMemMode;
}
for( int i = 0; i < menuSelection.enabled_filters.size(); i++){
if( i > 0 ) swapPointers(&m_in_aux.data, &m_out.data);
EffectStyle filter = menuSelection.enabled_filters.at(i);
set_convolution_properties(info.conv_properties, filter);
int conv_mat_length = info.conv_properties.size * info.conv_properties.size;
char conv_mat[conv_mat_length];
info.conv_matrix = conv_mat;
info.nb_pass = menuSelection.nb_pass.at(i);
info.nb_streams = menuSelection.nb_stream;
info.block.x = menuSelection.block.dimX;
info.block.y = menuSelection.block.dimY;
copyReverse(conv_mat, filter, conv_mat_length);
(*fnc_exec)(m_in_aux, m_out, info );
}
cv::imwrite(img_out, m_out);
hipHostFree(rgb_in);
hipHostFree(rgb_out);
return 0;
}
| 7a42cae925f38a6c4a7fa2156bc9d38199aa08c0.cu | #include <opencv2/opencv.hpp>
#include <vector>
#include <cstring>
#include <gpu/gpu_img_transform_stream.cuh>
#include "gpu/gpu_utilities.cuh"
#include "common/utilities.hpp"
#include "gpu/gpu_img_transform.cuh"
int main(int argc, char **argv)
{
std::string img_out; // default values in utilities.hpp
std::string img_in;
GpuUtilMenuSelection menuSelection;
GpuUtilMenuSelection::initParameters(img_in, img_out, menuSelection, argc, argv);
GpuUtilExecutionInfo info;
cv::Mat m_in = cv::imread(img_in, cv::IMREAD_UNCHANGED);
unsigned char* rgb_out = nullptr, *rgb_in = nullptr;
cudaMallocHost(&rgb_in, 3 * m_in.rows * m_in.cols);
cudaMallocHost(&rgb_out, 3 * m_in.rows * m_in.cols);
memcpy(rgb_in, m_in.data, 3 * m_in.rows * m_in.cols);
cv::Mat m_in_aux(m_in.rows, m_in.cols, CV_8UC3, rgb_in);
cv::Mat m_out(m_in.rows, m_in.cols, CV_8UC3, rgb_out);
int (*fnc_exec) (cv::Mat&, cv::Mat&, GpuUtilExecutionInfo& );
// menuSelection.use_shared = true;
// menuSelection.nb_stream = 4;
if( menuSelection.nb_stream == 0) {
if (!menuSelection.use_shared)
fnc_exec = GpuImgTransform::execute;
else
fnc_exec = GpuImgTransform::executeSharedMemMode;
}else {
if( menuSelection.nb_stream > m_in.rows ) menuSelection.nb_stream = 1;
if (!menuSelection.use_shared)
fnc_exec = GpuImgTransformStream::execute;
else
fnc_exec = GpuImgTransformStream::executeSharedMemMode;
}
for( int i = 0; i < menuSelection.enabled_filters.size(); i++){
if( i > 0 ) swapPointers(&m_in_aux.data, &m_out.data);
EffectStyle filter = menuSelection.enabled_filters.at(i);
set_convolution_properties(info.conv_properties, filter);
int conv_mat_length = info.conv_properties.size * info.conv_properties.size;
char conv_mat[conv_mat_length];
info.conv_matrix = conv_mat;
info.nb_pass = menuSelection.nb_pass.at(i);
info.nb_streams = menuSelection.nb_stream;
info.block.x = menuSelection.block.dimX;
info.block.y = menuSelection.block.dimY;
copyReverse(conv_mat, filter, conv_mat_length);
(*fnc_exec)(m_in_aux, m_out, info );
}
cv::imwrite(img_out, m_out);
cudaFreeHost(rgb_in);
cudaFreeHost(rgb_out);
return 0;
}
|
b312293a44302deb0d70243bb32ada91320ae024.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include "time.h"
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
hipMalloc((void **)&d_a, sizeof(int) * N);
hipMalloc((void **)&d_b, sizeof(int) * N);
hipMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
hipMemcpy(d_a, h_a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(int), hipMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
hipLaunchKernelGGL(( gpuAdd) , dim3(10000), dim3(1000) , 0, 0, d_a, d_b, d_c);
hipDeviceSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
hipMemcpy(h_c, d_c, N * sizeof(int), hipMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
system("pause");
return 0;
}
| b312293a44302deb0d70243bb32ada91320ae024.cu | #include "stdio.h"
#include "time.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defining number of elements in Array
#define N 10000000
// Defining Kernel function for vector addition
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c)
{
// Getting block index of current kernel
int tid = blockIdx.x * 1000 + threadIdx.x;
if (tid < N)
d_c[tid] = d_a[tid] + d_b[tid];
}
int main(void)
{
// Defining host arrays
int *h_a, *h_b, *h_c;
h_a = (int *)malloc(sizeof(int) * N);
h_b = (int *)malloc(sizeof(int) * N);
h_c = (int *)malloc(sizeof(int) * N);
// Defining device pointers
int *d_a, *d_b, *d_c;
// Initializing two arrays for addition
for (int i = 0; i < N; i++)
{
h_a[i] = i + 1;
h_b[i] = i - 1;
}
// Allocate the memory
cudaMalloc((void **)&d_a, sizeof(int) * N);
cudaMalloc((void **)&d_b, sizeof(int) * N);
cudaMalloc((void **)&d_c, sizeof(int) * N);
// Copy input arrays from host to device memory
cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice);
// Start Time
clock_t start_h = clock();
// Calling kernels with N blocks and one thread per block, passing device pointers as parameters
gpuAdd <<<10000, 1000 >>>(d_a, d_b, d_c);
cudaThreadSynchronize();
// End Time
clock_t end_h = clock();
// Copy result back to host memory from device memory
cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Vector addition on GPU \n");
// Execute Time
double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC;
printf("N = %d \n", N);
printf("Execute time: %f seconds \n", time_h);
// Printing result on console
for (int i = 9999990; i < N; i++)
{
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
// Free up memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
system("pause");
return 0;
}
|
08cbb6d190fe39cb978a8b7097a29186233221d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include "cudf.h"
#include "utilities/error_utils.h"
#include "rmm/rmm.h"
#include "rmm/thrust_rmm_allocator.h"
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, gdf_valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows);
__global__ void determineValidRecCount(gdf_valid_type *validArray, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(gdf_valid_type data, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
gdf_size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// gdf_size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
gdf_size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(hipMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
hipLaunchKernelGGL(( determineValidRecCount), dim3(blocks), dim3(threads), 0, 0, gdfData[x]->valid, numRows, numCol, offsets);
}
rmm_temp_allocator allocator(0); // TODO: non-default stream?
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(thrust::hip::par(allocator).on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( hipMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), hipMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
size_t * IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(gdf_size_type), 0));
CUDA_TRY(hipMemcpy(IA, offsets, ( sizeof(gdf_size_type) * (numRows + 2) ), hipMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets) {
gdf_size_type numCols = csrReturn->cols;
gdf_size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(hipMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( gdf_size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
hipLaunchKernelGGL(( cudaCreateCSR<T>), dim3(blocks), dim3(threads), 0, 0, gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, gdf_valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
gdf_size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(gdf_valid_type *valid, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
| 08cbb6d190fe39cb978a8b7097a29186233221d7.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include "cudf.h"
#include "utilities/error_utils.h"
#include "rmm/rmm.h"
#include "rmm/thrust_rmm_allocator.h"
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, gdf_valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows);
__global__ void determineValidRecCount(gdf_valid_type *validArray, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(gdf_valid_type data, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
gdf_size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// gdf_size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
gdf_size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(cudaMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
determineValidRecCount<<<blocks, threads>>>(gdfData[x]->valid, numRows, numCol, offsets);
}
rmm_temp_allocator allocator(0); // TODO: non-default stream?
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(thrust::cuda::par(allocator).on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( cudaMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), cudaMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
size_t * IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(gdf_size_type), 0));
CUDA_TRY(cudaMemcpy(IA, offsets, ( sizeof(gdf_size_type) * (numRows + 2) ), cudaMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, gdf_size_type * offsets) {
gdf_size_type numCols = csrReturn->cols;
gdf_size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(cudaMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( gdf_size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
cudaCreateCSR<T><<<blocks, threads>>>(gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, gdf_valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, gdf_size_type *offsets, gdf_size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
gdf_size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(gdf_valid_type *valid, gdf_size_type numRows, gdf_size_type numCol, gdf_size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
gdf_valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
|
9656cdb2f4d9aa2aacfb227e4e7738164e18c184.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <cmath>
#include <climits>
#include <iostream>
#include <chrono>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define MAX_THREAD_PER_BLOCK 512
#define DEBUG_PRINT
/*
PARALLEL NN - VERSION 2
*/
typedef unsigned short int usint;
const usint num_dimensions = 16;
const usint numPointsTest = 1000;
const usint numPointsTrain = 19000;
const usint streamCount = 4;
struct Coordinates {
usint points[num_dimensions];
};
__device__ float getDistance(const Coordinates & coord1, const Coordinates & coord2) {
float square_sum = 0;
for (int i = 0; i < num_dimensions; i++) {
const int c1 = coord1.points[i];
const int c2 = coord2.points[i];
square_sum += (c1 - c2)*(c1 - c2);
}
return sqrt(square_sum);
}
__global__ void nearestNeighbor(Coordinates * trainCoords, Coordinates * testCoords, const usint sizeTest, const usint sizeTrain, usint * nearestNeighbors) {
const usint threadId = blockIdx.x*blockDim.x + threadIdx.x;
if (threadId < sizeTest) { // DEBUG
usint nearestNeighbor = 0;
usint nearestDistance = USHRT_MAX;
for (int trainCoordInd = 0; trainCoordInd < sizeTrain; trainCoordInd++) {
float currentDistance = getDistance(trainCoords[trainCoordInd], testCoords[threadId]);
if (currentDistance < nearestDistance) {
nearestNeighbor = trainCoordInd;
nearestDistance = currentDistance;
}
}
nearestNeighbors[threadId] = nearestNeighbor;
}
}
bool checkError(const hipError_t & error, const char * msg = "") {
if (error != hipSuccess) {
printf("CUDA ERROR: %s\n", msg);
cout << error << endl;
exit(1);
}
return true;
}
int main() {
// 1 - INITIALIZE READ STREAMS
const char * testFile = "test.txt";
const char * trainFile = "train.txt";
FILE * test_is = fopen(testFile, "r"), * train_is = fopen(trainFile, "r");
if (!test_is) {
cerr << "Cannot open " << testFile << endl;
exit(1);
}
if (!train_is) {
cerr << "Cannot open " << trainFile << endl;
exit(1);
}
hipSetDevice(0); // initialize CUDA context
cout << "\t--------------------\n";
chrono::high_resolution_clock::time_point begin = chrono::high_resolution_clock::now(), temp, end;
// 2 - SET EXECUTION PARAMETERS
hipStream_t streams[streamCount]; // create four CUDA streams
hipError_t hipError_t;
usint numThreadsPerBlock = numPointsTest;
usint numBlocks = 1;
if (numPointsTest > MAX_THREAD_PER_BLOCK) {
numBlocks = ::ceil(static_cast<double>(numPointsTest) / MAX_THREAD_PER_BLOCK);
numThreadsPerBlock = MAX_THREAD_PER_BLOCK;
}
numThreadsPerBlock /= streamCount;
cout << "Kernels will be called with " << numBlocks << " blocks with " << numThreadsPerBlock << " threads each\n";
// 3 - READ TRAIN COORDINATES FROM FILE STREAMS
// device pointers
Coordinates * d_testCoordinates[streamCount], *d_trainCoordinates;
usint * d_nearestNeighbors[streamCount];
// host pointers
Coordinates * h_testCoordinates[streamCount], *h_trainCoordinates;
usint * h_nearestNeighbors[streamCount];
hipError_t = hipHostMalloc((void**)&h_trainCoordinates, numPointsTrain * sizeof(Coordinates));
checkError(hipError_t, "cudamallochost - h_trainCoordinates");
// read train points to host
for (int i = 0; i < numPointsTrain; i++) {
fscanf(train_is, "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n", &h_trainCoordinates[i].points[0], &h_trainCoordinates[i].points[1], &h_trainCoordinates[i].points[2],
&h_trainCoordinates[i].points[3], &h_trainCoordinates[i].points[4], &h_trainCoordinates[i].points[5], &h_trainCoordinates[i].points[6], &h_trainCoordinates[i].points[7],
&h_trainCoordinates[i].points[8], &h_trainCoordinates[i].points[9], &h_trainCoordinates[i].points[10], &h_trainCoordinates[i].points[11], &h_trainCoordinates[i].points[12],
&h_trainCoordinates[i].points[13], &h_trainCoordinates[i].points[14], &h_trainCoordinates[i].points[15]);
}
cout << "done reading training coordinates to host pinned memory" << endl;
// copy train coordinates to device
hipError_t = hipMalloc((void**)&d_trainCoordinates, numPointsTrain * sizeof(Coordinates));
checkError(hipError_t, "hipMalloc - d_trainCoordinates");
hipError_t = hipMemcpy(d_trainCoordinates, h_trainCoordinates, numPointsTrain * sizeof(Coordinates), hipMemcpyHostToDevice);
checkError(hipError_t, "hipMemcpyAsync - d_trainCoordinates");
chrono::high_resolution_clock::time_point kernel_start = chrono::high_resolution_clock::now();
for (usint stream = 0; stream < streamCount; stream++) {
// 1 - create stream
hipStreamCreate(&streams[stream]);
// 2 - Host memory - allocate memory on host for results and test coordinates
hipError_t = hipHostMalloc((void**)&h_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint));
checkError(hipError_t, "cudamallochost - h_nearestneighbors");
hipError_t = hipHostMalloc((void**)&h_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates));
checkError(hipError_t, "cudamallochost - h_testCoordinates");
// 3 - Host memory - read test points
for (int i = 0; i < numPointsTest / streamCount; i++) {
fscanf(test_is, "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", &h_testCoordinates[stream][i].points[0], &h_testCoordinates[stream][i].points[1], &h_testCoordinates[stream][i].points[2],
&h_testCoordinates[stream][i].points[3], &h_testCoordinates[stream][i].points[4], &h_testCoordinates[stream][i].points[5], &h_testCoordinates[stream][i].points[6], &h_testCoordinates[stream][i].points[7],
&h_testCoordinates[stream][i].points[8], &h_testCoordinates[stream][i].points[9], &h_testCoordinates[stream][i].points[10], &h_testCoordinates[stream][i].points[11], &h_testCoordinates[stream][i].points[12],
&h_testCoordinates[stream][i].points[13], &h_testCoordinates[stream][i].points[14], &h_testCoordinates[stream][i].points[15]);
}
// 4 - Device memory - allocate space for test coordiantes and result array for this stream to write its results to
hipError_t = hipMalloc((void**)&d_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates));
checkError(hipError_t, "hipMalloc - d_testCoordiantes");
hipError_t = hipMalloc((void**)&d_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint));
checkError(hipError_t, "hipMalloc - d_nearestNeighbors");
// 5 - copy test coordinates to device in async
temp = chrono::high_resolution_clock::now();
hipError_t = hipMemcpyAsync(d_testCoordinates[stream], h_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates), hipMemcpyHostToDevice, streams[stream]);
checkError(hipError_t, "hipMemcpy - d_testCoordinates");
end = chrono::high_resolution_clock::now();
cout << "data copied to device memory [" << chrono::duration_cast<chrono::milliseconds>(end - temp).count() << " ms]\n"
<< "executing kernel with " << numBlocks << " blocks with " << numThreadsPerBlock << " threads each" << endl;
// 6 - Inovke kernel for current stream
usint *& currentResultArray = d_nearestNeighbors[stream];
hipLaunchKernelGGL(( nearestNeighbor), dim3(numBlocks), dim3(numThreadsPerBlock), 0, streams[stream] , d_trainCoordinates, d_testCoordinates[stream], numPointsTest / streamCount, numPointsTrain, currentResultArray);
hipError_t = hipMemcpyAsync(h_nearestNeighbors[stream], d_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint), hipMemcpyDeviceToHost, streams[stream]);
checkError(hipError_t, "hipMemcpy - h_nearestNeighbors");
}
// Wait for GPU to terminate and fetch results
hipError_t = hipGetLastError();
checkError(hipError_t, "before deviceSync() error!");
hipDeviceSynchronize();
end = chrono::high_resolution_clock::now();
cout << "Computation + read test data: " << chrono::duration_cast<chrono::milliseconds>(end - kernel_start).count() << " ms\n";
cout << "\t--------------------\n";
end = chrono::high_resolution_clock::now();
ofstream os("output.txt");
for (int stream = 0; stream < streamCount; stream++) {
for (int i = 0; i < numPointsTest / streamCount; i++) {
os << h_nearestNeighbors[stream][i] << endl;
}
}
end = chrono::high_resolution_clock::now();
cout << "\t--------------------\nTotal time: " << chrono::duration_cast<chrono::milliseconds>(end - begin).count() << " ms\nterminating\n";
return 0;
}
| 9656cdb2f4d9aa2aacfb227e4e7738164e18c184.cu | #include <cuda.h>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <cmath>
#include <climits>
#include <iostream>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define MAX_THREAD_PER_BLOCK 512
#define DEBUG_PRINT
/*
PARALLEL NN - VERSION 2
*/
typedef unsigned short int usint;
const usint num_dimensions = 16;
const usint numPointsTest = 1000;
const usint numPointsTrain = 19000;
const usint streamCount = 4;
struct Coordinates {
usint points[num_dimensions];
};
__device__ float getDistance(const Coordinates & coord1, const Coordinates & coord2) {
float square_sum = 0;
for (int i = 0; i < num_dimensions; i++) {
const int c1 = coord1.points[i];
const int c2 = coord2.points[i];
square_sum += (c1 - c2)*(c1 - c2);
}
return sqrt(square_sum);
}
__global__ void nearestNeighbor(Coordinates * trainCoords, Coordinates * testCoords, const usint sizeTest, const usint sizeTrain, usint * nearestNeighbors) {
const usint threadId = blockIdx.x*blockDim.x + threadIdx.x;
if (threadId < sizeTest) { // DEBUG
usint nearestNeighbor = 0;
usint nearestDistance = USHRT_MAX;
for (int trainCoordInd = 0; trainCoordInd < sizeTrain; trainCoordInd++) {
float currentDistance = getDistance(trainCoords[trainCoordInd], testCoords[threadId]);
if (currentDistance < nearestDistance) {
nearestNeighbor = trainCoordInd;
nearestDistance = currentDistance;
}
}
nearestNeighbors[threadId] = nearestNeighbor;
}
}
bool checkError(const cudaError_t & error, const char * msg = "") {
if (error != cudaSuccess) {
printf("CUDA ERROR: %s\n", msg);
cout << error << endl;
exit(1);
}
return true;
}
int main() {
// 1 - INITIALIZE READ STREAMS
const char * testFile = "test.txt";
const char * trainFile = "train.txt";
FILE * test_is = fopen(testFile, "r"), * train_is = fopen(trainFile, "r");
if (!test_is) {
cerr << "Cannot open " << testFile << endl;
exit(1);
}
if (!train_is) {
cerr << "Cannot open " << trainFile << endl;
exit(1);
}
cudaSetDevice(0); // initialize CUDA context
cout << "\t--------------------\n";
chrono::high_resolution_clock::time_point begin = chrono::high_resolution_clock::now(), temp, end;
// 2 - SET EXECUTION PARAMETERS
cudaStream_t streams[streamCount]; // create four CUDA streams
cudaError_t cudaError;
usint numThreadsPerBlock = numPointsTest;
usint numBlocks = 1;
if (numPointsTest > MAX_THREAD_PER_BLOCK) {
numBlocks = std::ceil(static_cast<double>(numPointsTest) / MAX_THREAD_PER_BLOCK);
numThreadsPerBlock = MAX_THREAD_PER_BLOCK;
}
numThreadsPerBlock /= streamCount;
cout << "Kernels will be called with " << numBlocks << " blocks with " << numThreadsPerBlock << " threads each\n";
// 3 - READ TRAIN COORDINATES FROM FILE STREAMS
// device pointers
Coordinates * d_testCoordinates[streamCount], *d_trainCoordinates;
usint * d_nearestNeighbors[streamCount];
// host pointers
Coordinates * h_testCoordinates[streamCount], *h_trainCoordinates;
usint * h_nearestNeighbors[streamCount];
cudaError = cudaMallocHost((void**)&h_trainCoordinates, numPointsTrain * sizeof(Coordinates));
checkError(cudaError, "cudamallochost - h_trainCoordinates");
// read train points to host
for (int i = 0; i < numPointsTrain; i++) {
fscanf(train_is, "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n", &h_trainCoordinates[i].points[0], &h_trainCoordinates[i].points[1], &h_trainCoordinates[i].points[2],
&h_trainCoordinates[i].points[3], &h_trainCoordinates[i].points[4], &h_trainCoordinates[i].points[5], &h_trainCoordinates[i].points[6], &h_trainCoordinates[i].points[7],
&h_trainCoordinates[i].points[8], &h_trainCoordinates[i].points[9], &h_trainCoordinates[i].points[10], &h_trainCoordinates[i].points[11], &h_trainCoordinates[i].points[12],
&h_trainCoordinates[i].points[13], &h_trainCoordinates[i].points[14], &h_trainCoordinates[i].points[15]);
}
cout << "done reading training coordinates to host pinned memory" << endl;
// copy train coordinates to device
cudaError = cudaMalloc((void**)&d_trainCoordinates, numPointsTrain * sizeof(Coordinates));
checkError(cudaError, "cudaMalloc - d_trainCoordinates");
cudaError = cudaMemcpy(d_trainCoordinates, h_trainCoordinates, numPointsTrain * sizeof(Coordinates), cudaMemcpyHostToDevice);
checkError(cudaError, "cudaMemcpyAsync - d_trainCoordinates");
chrono::high_resolution_clock::time_point kernel_start = chrono::high_resolution_clock::now();
for (usint stream = 0; stream < streamCount; stream++) {
// 1 - create stream
cudaStreamCreate(&streams[stream]);
// 2 - Host memory - allocate memory on host for results and test coordinates
cudaError = cudaMallocHost((void**)&h_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint));
checkError(cudaError, "cudamallochost - h_nearestneighbors");
cudaError = cudaMallocHost((void**)&h_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates));
checkError(cudaError, "cudamallochost - h_testCoordinates");
// 3 - Host memory - read test points
for (int i = 0; i < numPointsTest / streamCount; i++) {
fscanf(test_is, "%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", &h_testCoordinates[stream][i].points[0], &h_testCoordinates[stream][i].points[1], &h_testCoordinates[stream][i].points[2],
&h_testCoordinates[stream][i].points[3], &h_testCoordinates[stream][i].points[4], &h_testCoordinates[stream][i].points[5], &h_testCoordinates[stream][i].points[6], &h_testCoordinates[stream][i].points[7],
&h_testCoordinates[stream][i].points[8], &h_testCoordinates[stream][i].points[9], &h_testCoordinates[stream][i].points[10], &h_testCoordinates[stream][i].points[11], &h_testCoordinates[stream][i].points[12],
&h_testCoordinates[stream][i].points[13], &h_testCoordinates[stream][i].points[14], &h_testCoordinates[stream][i].points[15]);
}
// 4 - Device memory - allocate space for test coordiantes and result array for this stream to write its results to
cudaError = cudaMalloc((void**)&d_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates));
checkError(cudaError, "cudaMalloc - d_testCoordiantes");
cudaError = cudaMalloc((void**)&d_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint));
checkError(cudaError, "cudaMalloc - d_nearestNeighbors");
// 5 - copy test coordinates to device in async
temp = chrono::high_resolution_clock::now();
cudaError = cudaMemcpyAsync(d_testCoordinates[stream], h_testCoordinates[stream], (numPointsTest / streamCount) * sizeof(Coordinates), cudaMemcpyHostToDevice, streams[stream]);
checkError(cudaError, "cudaMemcpy - d_testCoordinates");
end = chrono::high_resolution_clock::now();
cout << "data copied to device memory [" << chrono::duration_cast<chrono::milliseconds>(end - temp).count() << " ms]\n"
<< "executing kernel with " << numBlocks << " blocks with " << numThreadsPerBlock << " threads each" << endl;
// 6 - Inovke kernel for current stream
usint *& currentResultArray = d_nearestNeighbors[stream];
nearestNeighbor<<< numBlocks, numThreadsPerBlock, 0, streams[stream] >>>(d_trainCoordinates, d_testCoordinates[stream], numPointsTest / streamCount, numPointsTrain, currentResultArray);
cudaError = cudaMemcpyAsync(h_nearestNeighbors[stream], d_nearestNeighbors[stream], (numPointsTest / streamCount) * sizeof(usint), cudaMemcpyDeviceToHost, streams[stream]);
checkError(cudaError, "cudaMemcpy - h_nearestNeighbors");
}
// Wait for GPU to terminate and fetch results
cudaError = cudaGetLastError();
checkError(cudaError, "before deviceSync() error!");
cudaDeviceSynchronize();
end = chrono::high_resolution_clock::now();
cout << "Computation + read test data: " << chrono::duration_cast<chrono::milliseconds>(end - kernel_start).count() << " ms\n";
cout << "\t--------------------\n";
end = chrono::high_resolution_clock::now();
ofstream os("output.txt");
for (int stream = 0; stream < streamCount; stream++) {
for (int i = 0; i < numPointsTest / streamCount; i++) {
os << h_nearestNeighbors[stream][i] << endl;
}
}
end = chrono::high_resolution_clock::now();
cout << "\t--------------------\nTotal time: " << chrono::duration_cast<chrono::milliseconds>(end - begin).count() << " ms\nterminating\n";
return 0;
}
|
48570493ce1e4f42fd9de18599fee5ee22a4d2c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_hip.cuh"
__global__ void raiseImageKernel(float *output, float *input, int width, int height, float factor)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (i >= width || j >= height) return;
int idx = j*width + i;
output[idx] = factor * input[idx];
}
float DllExport *raiseImage(float *h_input, int width, int height, float factor);
__global__ void pixelSubtractionKernel(float *output, float *input1, float *input2, int width, int height)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int idx = j*width + i;
if (i < width && j < height)
output[idx] = input1[idx] - input2[idx];
}
float DllExport *pixelSubtraction(float *h_input1, float *h_input2, int width, int height)
{
int elementCount = width*height;
int size = elementCount*sizeof(float);
float *h_output1 = (float*)malloc(size), *h_output2;
float *dev_input1, *dev_input2, *dev_output;
hipMalloc((void**)&dev_input1, size);
hipMalloc((void**)&dev_input2, size);
hipMalloc((void**)&dev_output, size);
hipMemcpy(dev_input1, h_input1, size, hipMemcpyHostToDevice);
hipMemcpy(dev_input2, h_input2, size, hipMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLOCKDIM, THREADS_PER_BLOCKDIM);
dim3 blockCount(((width - 1) / THREADS_PER_BLOCKDIM) + 1, ((height - 1) / THREADS_PER_BLOCKDIM) + 1);
pixelSubtractionKernel << <blockCount, threadsPerBlock >> >(dev_output, dev_input1, dev_input2, width, height);
hipMemcpy(h_output1, dev_output, size, hipMemcpyDeviceToHost);
float factor = 0.0;
for (int i = 0; i < elementCount; i++)
if (factor < h_output1[i])
factor = h_output1[i];
if (factor == 0.0) return h_output1;
factor = 1.0 / factor;
raiseImageKernel << <blockCount, threadsPerBlock >> >(dev_input1, dev_output, width, height, factor);
h_output2 = (float*)malloc(size);
hipMemcpy(h_output2, dev_input1, size, hipMemcpyDeviceToHost);
free(h_output1);
hipFree(dev_input1);
hipFree(dev_input2);
hipFree(dev_output);
return h_output2;
}
__global__ void gaussianBlurKernel(float *output, float *input, int width, int height)
{
const int radius = 2;
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (i >= width || j >= height) return;
int idx = j*width + i;
int ix, jy;
float total = 0.0, totalFactor = 0.0, tmpFactor;
int tmp;
for (int x = -radius; x < radius; x++)
{
ix = i + x;
if ((ix >= 0) && (ix < width))
{
for (int y = -radius; y < radius; y++)
{
jy = j + y;
if ((jy >= 0) && (jy < height))
{
tmp = x*x + y*y;
switch (tmp)
{
case 4: // Apparait 8 fois
tmpFactor = 5.0;
break;
case 5:
tmpFactor = 4.0;
break;
case 8:
tmpFactor = 2.0;
break;
case 1:
tmpFactor = 12.0;
break;
case 2:
tmpFactor = 9.0;
break;
default: // Apparait 1 fois
tmpFactor = 15.0;
break;
}
totalFactor += tmpFactor;
total += tmpFactor*input[ix + jy*width];
}
}
}
}
output[idx] = total / totalFactor;
}
float DllExport *gaussianBlur(float *h_input, int width, int height)
{
int elementCount = height*width;
int size = elementCount*sizeof(float);
float *h_output = (float*)malloc(size);
float *dev_input, *dev_output;
hipMalloc((void**)&dev_input, size);
hipMalloc((void**)&dev_output, size);
hipMemcpy(dev_input, h_input, size, hipMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLOCKDIM, THREADS_PER_BLOCKDIM);
dim3 blockCount(((width - 1) / THREADS_PER_BLOCKDIM) + 1, ((height - 1) / THREADS_PER_BLOCKDIM) + 1);
gaussianBlurKernel << < blockCount, threadsPerBlock >> > (dev_output, dev_input, width, height);
hipMemcpy(h_output, dev_output, size, hipMemcpyDeviceToHost);
hipFree(dev_input);
hipFree(dev_output);
return h_output;
}
float DllExport *differenceOfGaussian(float *h_input, int width, int height)
{
return pixelSubtraction(gaussianBlur(h_input, width, height), h_input, width, height);
}
__global__ void underSamplingKernel(float *output, float *input, int newWidth, int newHeight, float sizeFactor)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int width = newWidth*sizeFactor;
if (i >= newWidth || j >= newHeight) return;
int idxOutput = j*newWidth + i;
int idxInput = (j*width + i)*sizeFactor;
output[idxOutput] = (input[idxInput] + input[idxInput + 1] + input[idxInput + width] + input[idxInput + width +1]) / 4;
}
float DllExport *underSampling(float *h_input, int width, int height, float sizeFactor)
{
if (sizeFactor < 1.0) return NULL;
int newWidth = width / sizeFactor;
int newHeight = height / sizeFactor;
int elementCount = height*width;
int newElementCount = newWidth*newHeight;
int size = elementCount*sizeof(float);
int newSize = newElementCount*sizeof(float);
float *h_output = (float*)malloc(newSize);
float *dev_input, *dev_output;
hipMalloc((void**)&dev_input, size);
hipMalloc((void**)&dev_output, newSize);
hipMemcpy(dev_input, h_input, size, hipMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLOCKDIM, THREADS_PER_BLOCKDIM);
dim3 blockCount(((newWidth - 1) / THREADS_PER_BLOCKDIM) + 1, ((newHeight - 1) / THREADS_PER_BLOCKDIM) + 1);
underSamplingKernel << < blockCount, threadsPerBlock >> > (dev_output, dev_input, newWidth, newHeight, sizeFactor);
hipMemcpy(h_output, dev_output, newSize, hipMemcpyDeviceToHost);
hipFree(dev_input);
hipFree(dev_output);
return h_output;
}
float DllExport *laplacian4Neib(float *h_input, int width, int height);
float DllExport *applyMask(float *h_input, int width, int height, int radius, float *h_mask);
float DllExport *applyThreshold(float *h_input, int width, int height, int threshold);
| 48570493ce1e4f42fd9de18599fee5ee22a4d2c6.cu | #include "kernel.cuh"
__global__ void raiseImageKernel(float *output, float *input, int width, int height, float factor)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (i >= width || j >= height) return;
int idx = j*width + i;
output[idx] = factor * input[idx];
}
float DllExport *raiseImage(float *h_input, int width, int height, float factor);
__global__ void pixelSubtractionKernel(float *output, float *input1, float *input2, int width, int height)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int idx = j*width + i;
if (i < width && j < height)
output[idx] = input1[idx] - input2[idx];
}
float DllExport *pixelSubtraction(float *h_input1, float *h_input2, int width, int height)
{
int elementCount = width*height;
int size = elementCount*sizeof(float);
float *h_output1 = (float*)malloc(size), *h_output2;
float *dev_input1, *dev_input2, *dev_output;
cudaMalloc((void**)&dev_input1, size);
cudaMalloc((void**)&dev_input2, size);
cudaMalloc((void**)&dev_output, size);
cudaMemcpy(dev_input1, h_input1, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_input2, h_input2, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLOCKDIM, THREADS_PER_BLOCKDIM);
dim3 blockCount(((width - 1) / THREADS_PER_BLOCKDIM) + 1, ((height - 1) / THREADS_PER_BLOCKDIM) + 1);
pixelSubtractionKernel << <blockCount, threadsPerBlock >> >(dev_output, dev_input1, dev_input2, width, height);
cudaMemcpy(h_output1, dev_output, size, cudaMemcpyDeviceToHost);
float factor = 0.0;
for (int i = 0; i < elementCount; i++)
if (factor < h_output1[i])
factor = h_output1[i];
if (factor == 0.0) return h_output1;
factor = 1.0 / factor;
raiseImageKernel << <blockCount, threadsPerBlock >> >(dev_input1, dev_output, width, height, factor);
h_output2 = (float*)malloc(size);
cudaMemcpy(h_output2, dev_input1, size, cudaMemcpyDeviceToHost);
free(h_output1);
cudaFree(dev_input1);
cudaFree(dev_input2);
cudaFree(dev_output);
return h_output2;
}
__global__ void gaussianBlurKernel(float *output, float *input, int width, int height)
{
const int radius = 2;
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (i >= width || j >= height) return;
int idx = j*width + i;
int ix, jy;
float total = 0.0, totalFactor = 0.0, tmpFactor;
int tmp;
for (int x = -radius; x < radius; x++)
{
ix = i + x;
if ((ix >= 0) && (ix < width))
{
for (int y = -radius; y < radius; y++)
{
jy = j + y;
if ((jy >= 0) && (jy < height))
{
tmp = x*x + y*y;
switch (tmp)
{
case 4: // Apparait 8 fois
tmpFactor = 5.0;
break;
case 5:
tmpFactor = 4.0;
break;
case 8:
tmpFactor = 2.0;
break;
case 1:
tmpFactor = 12.0;
break;
case 2:
tmpFactor = 9.0;
break;
default: // Apparait 1 fois
tmpFactor = 15.0;
break;
}
totalFactor += tmpFactor;
total += tmpFactor*input[ix + jy*width];
}
}
}
}
output[idx] = total / totalFactor;
}
float DllExport *gaussianBlur(float *h_input, int width, int height)
{
int elementCount = height*width;
int size = elementCount*sizeof(float);
float *h_output = (float*)malloc(size);
float *dev_input, *dev_output;
cudaMalloc((void**)&dev_input, size);
cudaMalloc((void**)&dev_output, size);
cudaMemcpy(dev_input, h_input, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLOCKDIM, THREADS_PER_BLOCKDIM);
dim3 blockCount(((width - 1) / THREADS_PER_BLOCKDIM) + 1, ((height - 1) / THREADS_PER_BLOCKDIM) + 1);
gaussianBlurKernel << < blockCount, threadsPerBlock >> > (dev_output, dev_input, width, height);
cudaMemcpy(h_output, dev_output, size, cudaMemcpyDeviceToHost);
cudaFree(dev_input);
cudaFree(dev_output);
return h_output;
}
float DllExport *differenceOfGaussian(float *h_input, int width, int height)
{
return pixelSubtraction(gaussianBlur(h_input, width, height), h_input, width, height);
}
__global__ void underSamplingKernel(float *output, float *input, int newWidth, int newHeight, float sizeFactor)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int width = newWidth*sizeFactor;
if (i >= newWidth || j >= newHeight) return;
int idxOutput = j*newWidth + i;
int idxInput = (j*width + i)*sizeFactor;
output[idxOutput] = (input[idxInput] + input[idxInput + 1] + input[idxInput + width] + input[idxInput + width +1]) / 4;
}
float DllExport *underSampling(float *h_input, int width, int height, float sizeFactor)
{
if (sizeFactor < 1.0) return NULL;
int newWidth = width / sizeFactor;
int newHeight = height / sizeFactor;
int elementCount = height*width;
int newElementCount = newWidth*newHeight;
int size = elementCount*sizeof(float);
int newSize = newElementCount*sizeof(float);
float *h_output = (float*)malloc(newSize);
float *dev_input, *dev_output;
cudaMalloc((void**)&dev_input, size);
cudaMalloc((void**)&dev_output, newSize);
cudaMemcpy(dev_input, h_input, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(THREADS_PER_BLOCKDIM, THREADS_PER_BLOCKDIM);
dim3 blockCount(((newWidth - 1) / THREADS_PER_BLOCKDIM) + 1, ((newHeight - 1) / THREADS_PER_BLOCKDIM) + 1);
underSamplingKernel << < blockCount, threadsPerBlock >> > (dev_output, dev_input, newWidth, newHeight, sizeFactor);
cudaMemcpy(h_output, dev_output, newSize, cudaMemcpyDeviceToHost);
cudaFree(dev_input);
cudaFree(dev_output);
return h_output;
}
float DllExport *laplacian4Neib(float *h_input, int width, int height);
float DllExport *applyMask(float *h_input, int width, int height, int radius, float *h_mask);
float DllExport *applyThreshold(float *h_input, int width, int height, int threshold);
|
cc02799a4fd0f527eee0492544181c6a4746df9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
nclude<stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <assert.h>
// Max-min normalization of input data with m samples and d features
void min_max_normalize(double *xs, int m, int d)
{
for (int x = 0; x < d; ++x) {
// calculate std for each column
double min = xs[x*d + 0];
double max = xs[x*d + 0];
for (int y = d; y < m*d; ++y) {
double val = xs[x*d + y];
if (val < min) {
min = val;
} else if (val > max) {
max = val;
}
}
for (int y = 0; y < m*d; ++y) {
double val = xs[x*d + y];
xs[x*d + y] = (val - min) / (max-min);
}
}
}
// GPU function for calculating the hypothesis function and individual gradient update for each feature of each sample
__global__ void map(int m, double *xs, double *ys, double *params, double *gradvec, int d){ // m is the no. of samples and d is the number of features in xs(input data)
//double *h;
//hipMalloc (&h, m*sizeof(float));
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<m){
double accum = params[0];
//double accum = 0.0;
for (int j=0; j<d; j++){
accum += xs[index*(d-1)+j] * params[j+1];
}
double h = 1.0/ (1.0 + exp(-accum));
gradvec[index*d+0] = (h - ys[index]) * 1;
for (int j = 1; j < d; j++){
gradvec[index*d+j] = (h - ys[index]) * xs[index*d+j];
}
}
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernelj(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (long idx = index; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE -1)) == 0){
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernelj(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernelj(inArray, &outputArrayBody[i], i, n, length);
}
}
// Finds the final gradient by summing up the element-wise gradients columnwise
extern "C"
__global__
void reducegrad(double *gradvec, double * sumgradvec, int m, int d) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < m)
deviceReduceArrayKernelj(gradvec, sumgradvec, d, m);
}
//Updates the weights/parameters based on the gradients
//alpha is the learning rate and lambda is the regularization parameter
void updateweight (double *params, double *sumgradvec, int m, int d, float alpha, float lambda){
for (int i=0; i<d; i++){
params[i] = params[i] - alpha * (sumgradvec[i]) - lambda * alpha * params[i];
}
}
int main(int argc, char *argv[]){
//Initialize number of samples ,features and iterations
int m, d, num_iterations;
if (argc!=4){
m = 100;
d = 3;
num_iterations = 100;
}
else{
m = atoi(argv[1]);
d = atoi(argv[2]);
num_iterations = atoi(argv[3]);
}
//Allocate host memory variables
size_t size1 = m*d*sizeof(double);
size_t size2 = m*sizeof(double);
size_t size3 = d*sizeof(double);
double *xs;
double *ys;
double *params;
double *sumgradvechost;
double *gradvec1;
xs = (double*)malloc(size1);
ys = (double*)malloc(size2);
params = (double*)malloc(size3);
sumgradvechost = (double*)malloc(size3);
gradvec1 = (double*)malloc(size1);
//Read input data from file
FILE *fp, *fp1;
fp = fopen ("input", "r");
if (!fp){
printf ("Unable to open file!");
return 1;
}
for (int i=0; i<m; i++){
for (int j=0; j<d-1; j++){
fscanf(fp, "%lf", &xs[i*(d-1) + j]);
}
fscanf(fp, "%lf", &ys[i]);
}
fclose(fp);
//Initialize weights
for (int i=0; i<d; i++){
params[i] = 0.0;
}
// Print first 5 rows of input data
for (int i=0; i<10; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
//Max-min mormalize input data
min_max_normalize(xs, m, d);
//Print first 5 rows of input data after normalization
for (int i=0; i<10; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
//Allocate variables in device memory
double *gpu_params;
double *gpu_xs;
double *gpu_ys;
double *gradvec;
double *sumgradvec;
hipMalloc (&gpu_params, size3);
hipMalloc(&gpu_xs, size1);
hipMalloc(&gpu_ys, size2);
hipMalloc(&gradvec, size1);
hipMalloc(&sumgradvec, size3);
//Copy vectors from host memory to device memory
hipMemcpy(gpu_xs, xs, size1, hipMemcpyHostToDevice);
hipMemcpy(gpu_ys, ys, size2, hipMemcpyHostToDevice);
hipMemcpy(gpu_params, params, size3, hipMemcpyHostToDevice);
//Initialize number of thread and blocks for calling GPU kernels
int threads_per_block = 512;
int blocks_per_grid = (m + threads_per_block - 1) / threads_per_block;
for (int i=0; i<num_iterations; i++){
//Compute hypothesis function and element-wise gradients
hipLaunchKernelGGL(( map), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, m, gpu_xs, gpu_ys, gpu_params, gradvec, d);
//Copy the element wise gradients from GPU to CPU
hipMemcpy(gradvec1, gradvec, size1, hipMemcpyDeviceToHost);
//Compute sum of all grad vector in GPU
hipLaunchKernelGGL(( reducegrad), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, gradvec, sumgradvec, m, d);
//Copy out grad's vector from GPU to CPU
hipMemcpy (sumgradvechost, sumgradvec, sizeof(double)*d, hipMemcpyDeviceToHost);
//Update weights in CPU. The learning rate is 0.001 and regualrization parameter is 10.
updateweight(params, sumgradvechost, m, d, 0.001, 10);
//Print current learned weights
for (int j=0; j<d; j++){
printf("%lf \t", params[j]); }
printf("\n");
// Copy in the updated weights back to GPU
hipMemcpy (gpu_params, params, sizeof(double) * d, hipMemcpyHostToDevice);
}
//Compute the predictions on the training data from the developed model
double predict[m];
for (int index=0; index<m; index++){
predict[index] = params[0];
for (int j=0; j<d; j++){
predict[index] += xs[index*(d-1)+j] * params[j+1];
}
}
//Compute the error for the model based on the percentage of true positives
double error = 0.0;
for (int i=0; i<m; i++){
int tmp = 0;
if ((1/( 1 + exp(-predict[i]))) >= 0.5) tmp = 1; else tmp = 0;
if (tmp != ys[i])
error ++;
}
error = error / m;
printf("%lf \n", error);
//Dump the prediction output to a file
fp1 = fopen("output", "w");
for (int i=0; i<m; i++){
fprintf(fp1, "%lf \n", 1 / (1 + exp(-predict[i])));
}
}
| cc02799a4fd0f527eee0492544181c6a4746df9d.cu | nclude<stdio.h>
#include <stdlib.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#include <assert.h>
// Max-min normalization of input data with m samples and d features
void min_max_normalize(double *xs, int m, int d)
{
for (int x = 0; x < d; ++x) {
// calculate std for each column
double min = xs[x*d + 0];
double max = xs[x*d + 0];
for (int y = d; y < m*d; ++y) {
double val = xs[x*d + y];
if (val < min) {
min = val;
} else if (val > max) {
max = val;
}
}
for (int y = 0; y < m*d; ++y) {
double val = xs[x*d + y];
xs[x*d + y] = (val - min) / (max-min);
}
}
}
// GPU function for calculating the hypothesis function and individual gradient update for each feature of each sample
__global__ void map(int m, double *xs, double *ys, double *params, double *gradvec, int d){ // m is the no. of samples and d is the number of features in xs(input data)
//double *h;
//cudaMalloc (&h, m*sizeof(float));
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index<m){
double accum = params[0];
//double accum = 0.0;
for (int j=0; j<d; j++){
accum += xs[index*(d-1)+j] * params[j+1];
}
double h = 1.0/ (1.0 + exp(-accum));
gradvec[index*d+0] = (h - ys[index]) * 1;
for (int j = 1; j < d; j++){
gradvec[index*d+j] = (h - ys[index]) * xs[index*d+j];
}
}
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernelj(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for (long idx = index; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE -1)) == 0){
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernelj(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernelj(inArray, &outputArrayBody[i], i, n, length);
}
}
// Finds the final gradient by summing up the element-wise gradients columnwise
extern "C"
__global__
void reducegrad(double *gradvec, double * sumgradvec, int m, int d) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < m)
deviceReduceArrayKernelj(gradvec, sumgradvec, d, m);
}
//Updates the weights/parameters based on the gradients
//alpha is the learning rate and lambda is the regularization parameter
void updateweight (double *params, double *sumgradvec, int m, int d, float alpha, float lambda){
for (int i=0; i<d; i++){
params[i] = params[i] - alpha * (sumgradvec[i]) - lambda * alpha * params[i];
}
}
int main(int argc, char *argv[]){
//Initialize number of samples ,features and iterations
int m, d, num_iterations;
if (argc!=4){
m = 100;
d = 3;
num_iterations = 100;
}
else{
m = atoi(argv[1]);
d = atoi(argv[2]);
num_iterations = atoi(argv[3]);
}
//Allocate host memory variables
size_t size1 = m*d*sizeof(double);
size_t size2 = m*sizeof(double);
size_t size3 = d*sizeof(double);
double *xs;
double *ys;
double *params;
double *sumgradvechost;
double *gradvec1;
xs = (double*)malloc(size1);
ys = (double*)malloc(size2);
params = (double*)malloc(size3);
sumgradvechost = (double*)malloc(size3);
gradvec1 = (double*)malloc(size1);
//Read input data from file
FILE *fp, *fp1;
fp = fopen ("input", "r");
if (!fp){
printf ("Unable to open file!");
return 1;
}
for (int i=0; i<m; i++){
for (int j=0; j<d-1; j++){
fscanf(fp, "%lf", &xs[i*(d-1) + j]);
}
fscanf(fp, "%lf", &ys[i]);
}
fclose(fp);
//Initialize weights
for (int i=0; i<d; i++){
params[i] = 0.0;
}
// Print first 5 rows of input data
for (int i=0; i<10; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
//Max-min mormalize input data
min_max_normalize(xs, m, d);
//Print first 5 rows of input data after normalization
for (int i=0; i<10; i+=2) {
printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]);
}
//Allocate variables in device memory
double *gpu_params;
double *gpu_xs;
double *gpu_ys;
double *gradvec;
double *sumgradvec;
cudaMalloc (&gpu_params, size3);
cudaMalloc(&gpu_xs, size1);
cudaMalloc(&gpu_ys, size2);
cudaMalloc(&gradvec, size1);
cudaMalloc(&sumgradvec, size3);
//Copy vectors from host memory to device memory
cudaMemcpy(gpu_xs, xs, size1, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ys, ys, size2, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_params, params, size3, cudaMemcpyHostToDevice);
//Initialize number of thread and blocks for calling GPU kernels
int threads_per_block = 512;
int blocks_per_grid = (m + threads_per_block - 1) / threads_per_block;
for (int i=0; i<num_iterations; i++){
//Compute hypothesis function and element-wise gradients
map<<<blocks_per_grid, threads_per_block>>>(m, gpu_xs, gpu_ys, gpu_params, gradvec, d);
//Copy the element wise gradients from GPU to CPU
cudaMemcpy(gradvec1, gradvec, size1, cudaMemcpyDeviceToHost);
//Compute sum of all grad vector in GPU
reducegrad<<<blocks_per_grid, threads_per_block>>>(gradvec, sumgradvec, m, d);
//Copy out grad's vector from GPU to CPU
cudaMemcpy (sumgradvechost, sumgradvec, sizeof(double)*d, cudaMemcpyDeviceToHost);
//Update weights in CPU. The learning rate is 0.001 and regualrization parameter is 10.
updateweight(params, sumgradvechost, m, d, 0.001, 10);
//Print current learned weights
for (int j=0; j<d; j++){
printf("%lf \t", params[j]); }
printf("\n");
// Copy in the updated weights back to GPU
cudaMemcpy (gpu_params, params, sizeof(double) * d, cudaMemcpyHostToDevice);
}
//Compute the predictions on the training data from the developed model
double predict[m];
for (int index=0; index<m; index++){
predict[index] = params[0];
for (int j=0; j<d; j++){
predict[index] += xs[index*(d-1)+j] * params[j+1];
}
}
//Compute the error for the model based on the percentage of true positives
double error = 0.0;
for (int i=0; i<m; i++){
int tmp = 0;
if ((1/( 1 + exp(-predict[i]))) >= 0.5) tmp = 1; else tmp = 0;
if (tmp != ys[i])
error ++;
}
error = error / m;
printf("%lf \n", error);
//Dump the prediction output to a file
fp1 = fopen("output", "w");
for (int i=0; i<m; i++){
fprintf(fp1, "%lf \n", 1 / (1 + exp(-predict[i])));
}
}
|
6606ebc8776b45eab4eefa08e7901044c7f87dee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time()
{ struct timeval tim;
hipDeviceSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
// GPU kernel
__global__ void copy_array(float *u, float *u_prev, int N)
{
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
u_prev[I] = u[I];
}
__global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha)
{
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
__shared__ float u_prev_sh[BSZ][BSZ];
u_prev_sh[i][j] = u_prev[I];
__syncthreads();
bool bound_check = ((I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1));
bool block_check = ((i>0) && (i<BSZ-1) && (j>0) && (j<BSZ-1));
// if not on block boundary do
if (block_check)
{ u[I] = u_prev_sh[i][j] + alpha*dt/h/h * (u_prev_sh[i+1][j] + u_prev_sh[i-1][j] + u_prev_sh[i][j+1] + u_prev_sh[i][j-1] - 4*u_prev_sh[i][j]);
}
// if not on boundary
else if (bound_check)
//if (bound_check)
{ u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main(int argc, char **argv)
{
// Allocate in CPU
if (argc < 3) {
printf("Invalid number of arguments");
return;
}
// Allocate in CPU
int N = atoi(argv[1]);
int BLOCKSIZE = BSZ;
hipSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = (int) ceil(time/dt);
steps = 100;
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
hipMalloc( (void**) &u_d, N*N*sizeof(float));
hipMalloc( (void**) &u_prev_d, N*N*sizeof(float));
// Copy to GPU
hipMemcpy(u_d, u, N*N*sizeof(float), hipMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t=0; t<steps; t++)
{hipLaunchKernelGGL(( copy_array) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N);
hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, h, dt, alpha);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
// std::cout<<"time = "<<elapsed<<std::endl;
// std::cout << N << "," << BLOCKSIZE << "," << elapsed << std::endl;
// Copy result back to host
hipMemcpy(u, u_d, N*N*sizeof(float), hipMemcpyDeviceToHost);
/*
std::ofstream temperature("temperature_shared.txt");
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
// std::cout<<u[I]<<"\t";
temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
}
temperature<<"\n";
//std::cout<<std::endl;
}
temperature.close();
*/
// Free device
hipFree(u_d);
hipFree(u_prev_d);
}
| 6606ebc8776b45eab4eefa08e7901044c7f87dee.cu | #include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time()
{ struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
// GPU kernel
__global__ void copy_array(float *u, float *u_prev, int N)
{
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
u_prev[I] = u[I];
}
__global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha)
{
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
__shared__ float u_prev_sh[BSZ][BSZ];
u_prev_sh[i][j] = u_prev[I];
__syncthreads();
bool bound_check = ((I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1));
bool block_check = ((i>0) && (i<BSZ-1) && (j>0) && (j<BSZ-1));
// if not on block boundary do
if (block_check)
{ u[I] = u_prev_sh[i][j] + alpha*dt/h/h * (u_prev_sh[i+1][j] + u_prev_sh[i-1][j] + u_prev_sh[i][j+1] + u_prev_sh[i][j-1] - 4*u_prev_sh[i][j]);
}
// if not on boundary
else if (bound_check)
//if (bound_check)
{ u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main(int argc, char **argv)
{
// Allocate in CPU
if (argc < 3) {
printf("Invalid number of arguments");
return;
}
// Allocate in CPU
int N = atoi(argv[1]);
int BLOCKSIZE = BSZ;
cudaSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = (int) ceil(time/dt);
steps = 100;
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
cudaMalloc( (void**) &u_d, N*N*sizeof(float));
cudaMalloc( (void**) &u_prev_d, N*N*sizeof(float));
// Copy to GPU
cudaMemcpy(u_d, u, N*N*sizeof(float), cudaMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t=0; t<steps; t++)
{ copy_array <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N);
update <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, h, dt, alpha);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
// std::cout<<"time = "<<elapsed<<std::endl;
// std::cout << N << "," << BLOCKSIZE << "," << elapsed << std::endl;
// Copy result back to host
cudaMemcpy(u, u_d, N*N*sizeof(float), cudaMemcpyDeviceToHost);
/*
std::ofstream temperature("temperature_shared.txt");
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
// std::cout<<u[I]<<"\t";
temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
}
temperature<<"\n";
//std::cout<<std::endl;
}
temperature.close();
*/
// Free device
cudaFree(u_d);
cudaFree(u_prev_d);
}
|
10194087318c0f6e2ac3a08ccca08f2ff5f3194c.hip | // !!! This is a file automatically generated by hipify!!!
// File: mpi.cu
// C/Fortran interface to MPI.
// includes standard headers
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes cuda headers
#include <hip/hip_runtime.h>
// includes project headers
#include "cuda_globals.h"
#ifdef GPUDIRECT
// includes mpi headers
#undef SEEK_SET // remove compilation errors
#undef SEEK_CUR // with C++ binding of MPI
#undef SEEK_END
#include <mpi.h>
#include <algorithm> //namespace std has no member min error otherwise
#endif
/******************************************************/
// CUDA wrappers/kernels for scattering vectors
// to do a blocked MPI_AlltoAll call
/*
__global__ void curedis_ref(hipDoubleComplex *src, hipDoubleComplex *dst,
const int NVECTORS, const int NBLOCKS, const int BLOCK_SIZE)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// for each thread,
if(idx == 0)
{
int stridesrc = NBLOCKS * BLOCK_SIZE;
int stridedst = NVECTORS * BLOCK_SIZE;
for(int i=0;i<NVECTORS;i++)
for(int j=0;j<NBLOCKS;j++)
for(int k=0;k<BLOCK_SIZE;k++)
dst[j*stridedst+i*BLOCK_SIZE+k] = src[i*stridesrc+j*BLOCK_SIZE+k];
}
}
*/
/*
template <class T>
__global__ void cuscatter(T *src, T *dst, const int NVECTORS,
const int NBLOCKS, const int BLOCK_SIZE)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// for each thread,
if(idx < BLOCK_SIZE)
{
int indexsrc = blockIdx.y*NBLOCKS*BLOCK_SIZE + blockIdx.z*BLOCK_SIZE + idx;
int indexdst = blockIdx.z*NVECTORS*BLOCK_SIZE + blockIdx.y*BLOCK_SIZE + idx;
dst[indexdst] = src[indexsrc];
}
}
*/
/*
// scatters vectors for blocked MPI_Alltoall, type double
extern "C"
void cuda_scatterd_(devptr_t *devptr_src, devptr_t *devptr_dst,
const int *NVECTORS, const int *NBLOCKS, const int *BLOCK_SIZE)
{
// grid dimensions
dim3 block(MAX_THREADS);
dim3 grid((*BLOCK_SIZE+block.x-1)/block.x,*NVECTORS,*NBLOCKS);
// device pointers
double *src = (double *)(*devptr_src);
double *dst = (double *)(*devptr_dst);
cuscatter<double><<<grid,block>>>(src,dst,*NVECTORS,*NBLOCKS,*BLOCK_SIZE);
CUDA_ERROR( hipGetLastError(), "Failed to execute CUDA kernel cuscatter!" );
}
*/
/*
// scatters vectors for blocked MPI_Alltoall, type hipDoubleComplex
extern "C"
void cuda_scatterz_(devptr_t *devptr_src, devptr_t *devptr_dst,
const int *NVECTORS, const int *NBLOCKS, const int *BLOCK_SIZE)
{
// grid dimensions
dim3 block(MAX_THREADS);
dim3 grid((*BLOCK_SIZE+block.x-1)/block.x,*NVECTORS,*NBLOCKS);
// device pointers
hipDoubleComplex *src = (hipDoubleComplex *)(*devptr_src);
hipDoubleComplex *dst = (hipDoubleComplex *)(*devptr_dst);
cuscatter<hipDoubleComplex><<<grid,block>>>(src,dst,*NVECTORS,*NBLOCKS,*BLOCK_SIZE);
CUDA_ERROR( hipGetLastError(), "Failed to execute CUDA kernel cuscatter!" );
}
*/
/******************************************************/
// CUDA wrappers/kernels for MPI_Alltoall with CUDA Aware MPI
/*
// MPI_Alltoall
extern "C"
void cuda_mpialltoall_(devptr_t *sendbuf, const int *sendcount, void *sendtype,
devptr_t *recvbuf, const int *recvcount, void *recvtype, void *comm)
{
#ifdef GPUDIRECT
MPI_Alltoall((void *)(*sendbuf),*sendcount,*((MPI_Datatype*)sendtype),
(void *)(*recvbuf),*recvcount,*((MPI_Datatype*)recvtype),
*((MPI_Comm*)comm));
#endif
}
*/
//Code to support GPU-Direct in the VASP MPI methods that perform an
//all-to-all operation. See readme_GPUDirect for more details on how
//to enable/use it.
void d2d(void *src, void *dst, const int size)
{
CUDA_ERROR( hipMemcpy(dst, src, size, hipMemcpyDeviceToDevice),
"Failed to copy from device to device!" );
}
void h2d(void *src, void *dst, const int size)
{
CUDA_ERROR( hipMemcpy(dst, src, size, hipMemcpyHostToDevice),
"Failed to copy from host to device!" );
}
void d2h(void *src, void *dst, const int size)
{
CUDA_ERROR( hipMemcpy(dst, src, size, hipMemcpyDeviceToHost),
"Failed to copy from device to host!" );
}
// Custom MPI_Alltoall implementation in VASP
void cuda_alltoall_C(double **src_, double **dst_, const int *size_,
const int *procId_, const int *nProcs_, const int *MAX_)
{
#ifdef GPUDIRECT
MPI_Status *status = NULL;
MPI_Request *request = NULL;
double *src = *src_;
double *dst = *dst_;
const int size = *size_;
const int procId = *procId_;
const int nProcs = *nProcs_;
const int MAX = *MAX_;
//fprintf(stderr,"src: %p, dst: %p, %d\t%d\t%d\t%d\n",
// src, dst, size,procId, nProcs, MAX);
const int sndCount = size / nProcs;
int err = 0;
int reqIdx = 0;
int nRequests = (sndCount / MAX) + 1; //Number of loops
nRequests *= 2; //Send and receive
nRequests += 16; //2 is enough (local copy), 16 is safety
status = new MPI_Status [nRequests];
request = new MPI_Request[nRequests];
for(int block=0; block < sndCount; block += MAX)
{
const int curSndCount = ::min(MAX, sndCount-block);
const int p = block; //Start address offset
//Startup the Irecvs
for(int id=1; id < nProcs; id++)
{
const int target = (id+procId) % nProcs;
err = MPI_Irecv(&dst[target*sndCount + p],
curSndCount, MPI_DOUBLE,
target, 543, MPI_COMM_WORLD,
&request[reqIdx++]);
if(err != MPI_SUCCESS) fprintf(stderr,"Error in MPI_Irecv \n");
}
//Startup the Isnds
for(int id=1; id < nProcs; id++)
{
const int target = (id+procId) % nProcs;
err = MPI_Isend(&src[target*sndCount + p],
curSndCount, MPI_DOUBLE,
target, 543, MPI_COMM_WORLD,
&request[reqIdx++]);
}
//Do the local copy, reuse MPI functions. Allows universal interface for
//the fortan side. Use same function if we use GPU pointer or host pointer
//Get the pointer type, if call fails its normal malloc
//otherwise test if its device or host to determine copy method
hipPointerAttribute_t attrSrc;
hipError_t resSrc = hipPointerGetAttributes(&attrSrc, src);
hipPointerAttribute_t attrDst;
hipError_t resDst = hipPointerGetAttributes(&attrDst, dst);
hipGetLastError();
//fprintf(stderr,"All to all test: src: %d dst: %d \n", resSrc, resDst);
if(resSrc != hipSuccess && resDst != hipSuccess)
{
//Use Isend/Irecv since one of the two failed indicating
//one of the two is non-pinned host memory
MPI_Isend(&src[procId*sndCount+p], curSndCount, MPI_DOUBLE,
procId, 123, MPI_COMM_WORLD, &request[reqIdx++]);
MPI_Irecv(&dst[procId*sndCount+p], curSndCount, MPI_DOUBLE,
procId, 123, MPI_COMM_WORLD, &request[reqIdx++]);
}
else
{
//Change type such that below functions work nicely
if(resSrc != hipSuccess) attrSrc.memoryType = hipMemoryTypeHost;
if(resDst != hipSuccess) attrDst.memoryType = hipMemoryTypeHost;
if(attrSrc.memoryType == hipMemoryTypeDevice && attrDst.memoryType == hipMemoryTypeDevice)
{
d2d(&src[procId*sndCount+p], &dst[procId*sndCount+p], curSndCount*sizeof(double));
}
else
{
if(attrSrc.memoryType == hipMemoryTypeDevice && attrDst.memoryType == hipMemoryTypeHost)
{
//Data is on device and needs to go to the host
d2h(&src[procId*sndCount+p], &dst[procId*sndCount+p], curSndCount*sizeof(double));
}
else if(attrSrc.memoryType == hipMemoryTypeHost && attrDst.memoryType == hipMemoryTypeDevice)
{
//Data is on the host (pinned) and needs to go to the device
h2d(&src[procId*sndCount+p], &dst[procId*sndCount+p], curSndCount*sizeof(double));
}
else
{
//Both buffers are on the host
for(int i=0; i < curSndCount; i++)
dst[procId*sndCount+p+i] = src[procId*sndCount+p+i];
}
} //if memType
} //if resSrc && resDst
MPI_Waitall(reqIdx,request, status);
reqIdx = 0;
}
delete[] status;
delete[] request;
#endif
} //end vasp_all_to_all
void cuda_alltoall_host_dev_C(double **src_, double **dst_, const int *size_,
const int *procId_, const int *nProcs_, const int *MAX_)
{
cuda_alltoall_C(src_, dst_, size_, procId_, nProcs_, MAX_);
}
void cuda_alltoall_dev_host_C(double **src_, double **dst_, const int *size_,
const int *procId_, const int *nProcs_, const int *MAX_)
{
cuda_alltoall_C(src_, dst_, size_, procId_, nProcs_, MAX_);
}
/******************************************************/
| 10194087318c0f6e2ac3a08ccca08f2ff5f3194c.cu | // File: mpi.cu
// C/Fortran interface to MPI.
// includes standard headers
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// includes cuda headers
#include <cuda_runtime.h>
// includes project headers
#include "cuda_globals.h"
#ifdef GPUDIRECT
// includes mpi headers
#undef SEEK_SET // remove compilation errors
#undef SEEK_CUR // with C++ binding of MPI
#undef SEEK_END
#include <mpi.h>
#include <algorithm> //namespace std has no member min error otherwise
#endif
/******************************************************/
// CUDA wrappers/kernels for scattering vectors
// to do a blocked MPI_AlltoAll call
/*
__global__ void curedis_ref(cuDoubleComplex *src, cuDoubleComplex *dst,
const int NVECTORS, const int NBLOCKS, const int BLOCK_SIZE)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// for each thread,
if(idx == 0)
{
int stridesrc = NBLOCKS * BLOCK_SIZE;
int stridedst = NVECTORS * BLOCK_SIZE;
for(int i=0;i<NVECTORS;i++)
for(int j=0;j<NBLOCKS;j++)
for(int k=0;k<BLOCK_SIZE;k++)
dst[j*stridedst+i*BLOCK_SIZE+k] = src[i*stridesrc+j*BLOCK_SIZE+k];
}
}
*/
/*
template <class T>
__global__ void cuscatter(T *src, T *dst, const int NVECTORS,
const int NBLOCKS, const int BLOCK_SIZE)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
// for each thread,
if(idx < BLOCK_SIZE)
{
int indexsrc = blockIdx.y*NBLOCKS*BLOCK_SIZE + blockIdx.z*BLOCK_SIZE + idx;
int indexdst = blockIdx.z*NVECTORS*BLOCK_SIZE + blockIdx.y*BLOCK_SIZE + idx;
dst[indexdst] = src[indexsrc];
}
}
*/
/*
// scatters vectors for blocked MPI_Alltoall, type double
extern "C"
void cuda_scatterd_(devptr_t *devptr_src, devptr_t *devptr_dst,
const int *NVECTORS, const int *NBLOCKS, const int *BLOCK_SIZE)
{
// grid dimensions
dim3 block(MAX_THREADS);
dim3 grid((*BLOCK_SIZE+block.x-1)/block.x,*NVECTORS,*NBLOCKS);
// device pointers
double *src = (double *)(*devptr_src);
double *dst = (double *)(*devptr_dst);
cuscatter<double><<<grid,block>>>(src,dst,*NVECTORS,*NBLOCKS,*BLOCK_SIZE);
CUDA_ERROR( cudaGetLastError(), "Failed to execute CUDA kernel cuscatter!" );
}
*/
/*
// scatters vectors for blocked MPI_Alltoall, type cuDoubleComplex
extern "C"
void cuda_scatterz_(devptr_t *devptr_src, devptr_t *devptr_dst,
const int *NVECTORS, const int *NBLOCKS, const int *BLOCK_SIZE)
{
// grid dimensions
dim3 block(MAX_THREADS);
dim3 grid((*BLOCK_SIZE+block.x-1)/block.x,*NVECTORS,*NBLOCKS);
// device pointers
cuDoubleComplex *src = (cuDoubleComplex *)(*devptr_src);
cuDoubleComplex *dst = (cuDoubleComplex *)(*devptr_dst);
cuscatter<cuDoubleComplex><<<grid,block>>>(src,dst,*NVECTORS,*NBLOCKS,*BLOCK_SIZE);
CUDA_ERROR( cudaGetLastError(), "Failed to execute CUDA kernel cuscatter!" );
}
*/
/******************************************************/
// CUDA wrappers/kernels for MPI_Alltoall with CUDA Aware MPI
/*
// MPI_Alltoall
extern "C"
void cuda_mpialltoall_(devptr_t *sendbuf, const int *sendcount, void *sendtype,
devptr_t *recvbuf, const int *recvcount, void *recvtype, void *comm)
{
#ifdef GPUDIRECT
MPI_Alltoall((void *)(*sendbuf),*sendcount,*((MPI_Datatype*)sendtype),
(void *)(*recvbuf),*recvcount,*((MPI_Datatype*)recvtype),
*((MPI_Comm*)comm));
#endif
}
*/
//Code to support GPU-Direct in the VASP MPI methods that perform an
//all-to-all operation. See readme_GPUDirect for more details on how
//to enable/use it.
void d2d(void *src, void *dst, const int size)
{
CUDA_ERROR( cudaMemcpy(dst, src, size, cudaMemcpyDeviceToDevice),
"Failed to copy from device to device!" );
}
void h2d(void *src, void *dst, const int size)
{
CUDA_ERROR( cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice),
"Failed to copy from host to device!" );
}
void d2h(void *src, void *dst, const int size)
{
CUDA_ERROR( cudaMemcpy(dst, src, size, cudaMemcpyDeviceToHost),
"Failed to copy from device to host!" );
}
// Custom MPI_Alltoall implementation in VASP
void cuda_alltoall_C(double **src_, double **dst_, const int *size_,
const int *procId_, const int *nProcs_, const int *MAX_)
{
#ifdef GPUDIRECT
MPI_Status *status = NULL;
MPI_Request *request = NULL;
double *src = *src_;
double *dst = *dst_;
const int size = *size_;
const int procId = *procId_;
const int nProcs = *nProcs_;
const int MAX = *MAX_;
//fprintf(stderr,"src: %p, dst: %p, %d\t%d\t%d\t%d\n",
// src, dst, size,procId, nProcs, MAX);
const int sndCount = size / nProcs;
int err = 0;
int reqIdx = 0;
int nRequests = (sndCount / MAX) + 1; //Number of loops
nRequests *= 2; //Send and receive
nRequests += 16; //2 is enough (local copy), 16 is safety
status = new MPI_Status [nRequests];
request = new MPI_Request[nRequests];
for(int block=0; block < sndCount; block += MAX)
{
const int curSndCount = std::min(MAX, sndCount-block);
const int p = block; //Start address offset
//Startup the Irecvs
for(int id=1; id < nProcs; id++)
{
const int target = (id+procId) % nProcs;
err = MPI_Irecv(&dst[target*sndCount + p],
curSndCount, MPI_DOUBLE,
target, 543, MPI_COMM_WORLD,
&request[reqIdx++]);
if(err != MPI_SUCCESS) fprintf(stderr,"Error in MPI_Irecv \n");
}
//Startup the Isnds
for(int id=1; id < nProcs; id++)
{
const int target = (id+procId) % nProcs;
err = MPI_Isend(&src[target*sndCount + p],
curSndCount, MPI_DOUBLE,
target, 543, MPI_COMM_WORLD,
&request[reqIdx++]);
}
//Do the local copy, reuse MPI functions. Allows universal interface for
//the fortan side. Use same function if we use GPU pointer or host pointer
//Get the pointer type, if call fails its normal malloc
//otherwise test if its device or host to determine copy method
cudaPointerAttributes attrSrc;
cudaError_t resSrc = cudaPointerGetAttributes(&attrSrc, src);
cudaPointerAttributes attrDst;
cudaError_t resDst = cudaPointerGetAttributes(&attrDst, dst);
cudaGetLastError();
//fprintf(stderr,"All to all test: src: %d dst: %d \n", resSrc, resDst);
if(resSrc != cudaSuccess && resDst != cudaSuccess)
{
//Use Isend/Irecv since one of the two failed indicating
//one of the two is non-pinned host memory
MPI_Isend(&src[procId*sndCount+p], curSndCount, MPI_DOUBLE,
procId, 123, MPI_COMM_WORLD, &request[reqIdx++]);
MPI_Irecv(&dst[procId*sndCount+p], curSndCount, MPI_DOUBLE,
procId, 123, MPI_COMM_WORLD, &request[reqIdx++]);
}
else
{
//Change type such that below functions work nicely
if(resSrc != cudaSuccess) attrSrc.memoryType = cudaMemoryTypeHost;
if(resDst != cudaSuccess) attrDst.memoryType = cudaMemoryTypeHost;
if(attrSrc.memoryType == cudaMemoryTypeDevice && attrDst.memoryType == cudaMemoryTypeDevice)
{
d2d(&src[procId*sndCount+p], &dst[procId*sndCount+p], curSndCount*sizeof(double));
}
else
{
if(attrSrc.memoryType == cudaMemoryTypeDevice && attrDst.memoryType == cudaMemoryTypeHost)
{
//Data is on device and needs to go to the host
d2h(&src[procId*sndCount+p], &dst[procId*sndCount+p], curSndCount*sizeof(double));
}
else if(attrSrc.memoryType == cudaMemoryTypeHost && attrDst.memoryType == cudaMemoryTypeDevice)
{
//Data is on the host (pinned) and needs to go to the device
h2d(&src[procId*sndCount+p], &dst[procId*sndCount+p], curSndCount*sizeof(double));
}
else
{
//Both buffers are on the host
for(int i=0; i < curSndCount; i++)
dst[procId*sndCount+p+i] = src[procId*sndCount+p+i];
}
} //if memType
} //if resSrc && resDst
MPI_Waitall(reqIdx,request, status);
reqIdx = 0;
}
delete[] status;
delete[] request;
#endif
} //end vasp_all_to_all
void cuda_alltoall_host_dev_C(double **src_, double **dst_, const int *size_,
const int *procId_, const int *nProcs_, const int *MAX_)
{
cuda_alltoall_C(src_, dst_, size_, procId_, nProcs_, MAX_);
}
void cuda_alltoall_dev_host_C(double **src_, double **dst_, const int *size_,
const int *procId_, const int *nProcs_, const int *MAX_)
{
cuda_alltoall_C(src_, dst_, size_, procId_, nProcs_, MAX_);
}
/******************************************************/
|
42a219b3e81df18b1f6787e024bd69c6e4911e06.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <dlfcn.h>
#include <map>
#include <stdio.h>
#include <string>
using namespace std;
#ifdef __cplusplus
extern "C" {
#endif
#include "openacc_profiling.h"
#include "../timing.h"
static char* wrapper_funcname = 0;
static long wrapper_lineno = 0;
map<string, int> regcounts;
int kernelgen_enable_openacc_regcount(char* funcname, long lineno)
{
wrapper_funcname = funcname;
wrapper_lineno = lineno;
return 0;
}
int kernelgen_disable_openacc_regcount()
{
wrapper_funcname = 0;
return 0;
}
void __real_openacci_call(const char *file_name, int line_number, const char *function_name);
static const char* capture_kernel_launch = NULL;
void __wrap_openacci_call(const char *file_name, int line_number, const char *function_name)
{
if (__builtin_expect(wrapper_funcname != NULL, 1))
{
if (!strcmp(wrapper_funcname, file_name) &&
(wrapper_lineno == line_number))
{
map<string, int>::iterator it = regcounts.find(function_name);
if (it == regcounts.end())
{
// Capture and output regcount for the next launch.
capture_kernel_launch = function_name;
}
else
{
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
wrapper_lineno, it->second);
}
}
}
__real_openacci_call(file_name, line_number, function_name);
}
bool timing = false;
struct timespec kernel_start, kernel_finish;
typedef hipError_t (*cuLaunchKernel_t)(
hipFunction_t,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
hipStream_t,
void **,
void **);
static cuLaunchKernel_t cuLaunchKernel_ = NULL;
hipError_t hipModuleLaunchKernel(
hipFunction_t f,
unsigned int gridDimX,
unsigned int gridDimY,
unsigned int gridDimZ,
unsigned int blockDimX,
unsigned int blockDimY,
unsigned int blockDimZ,
unsigned int sharedMemBytes,
hipStream_t hStream,
void **kernelParams,
void **extra)
{
if (capture_kernel_launch)
{
int regcount = -1;
hipError_t curesult = hipFuncGetAttribute(®count, hipFuncAttributeNumRegs, f);
if (curesult != hipSuccess)
{
fprintf(stderr, "Failed to determine regcount for function %s\n", wrapper_funcname);
exit(-1);
}
regcounts[capture_kernel_launch] = regcount;
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
wrapper_lineno, regcount);
capture_kernel_launch = NULL;
}
// Measure kernel time.
get_time(&kernel_start);
timing = true;
return cuLaunchKernel_(f,
gridDimX, gridDimY, gridDimZ,
blockDimX, blockDimY, blockDimZ,
sharedMemBytes, hStream, kernelParams, extra);
}
typedef hipError_t (*cuCtxSynchronize_t)();
static cuCtxSynchronize_t cuCtxSynchronize_ = NULL;
hipError_t hipCtxSynchronize()
{
hipError_t result = cuCtxSynchronize_();
if (timing)
{
get_time(&kernel_finish);
fprintf(stderr, "%s:%ld time = %f\n", wrapper_funcname,
wrapper_lineno, get_time_diff(&kernel_start, &kernel_finish));
timing = false;
}
return result;
}
void* __libc_dlsym(void* handle, const char* symname) __THROW;
void* dlsym(void* handle, const char* symname) __THROW
{
void* addr = __libc_dlsym(handle, symname);
if (!strcmp(symname, "hipModuleLaunchKernel"))
{
cuLaunchKernel_ = (cuLaunchKernel_t)addr;
return (void*)&hipModuleLaunchKernel;
}
if (!strcmp(symname, "hipCtxSynchronize"))
{
cuCtxSynchronize_ = (cuCtxSynchronize_t)addr;
return (void*)&hipCtxSynchronize;
}
return addr;
}
#ifdef __cplusplus
}
#endif
| 42a219b3e81df18b1f6787e024bd69c6e4911e06.cu | #include <cuda.h>
#include <dlfcn.h>
#include <map>
#include <stdio.h>
#include <string>
using namespace std;
#ifdef __cplusplus
extern "C" {
#endif
#include "openacc_profiling.h"
#include "../timing.h"
static char* wrapper_funcname = 0;
static long wrapper_lineno = 0;
map<string, int> regcounts;
int kernelgen_enable_openacc_regcount(char* funcname, long lineno)
{
wrapper_funcname = funcname;
wrapper_lineno = lineno;
return 0;
}
int kernelgen_disable_openacc_regcount()
{
wrapper_funcname = 0;
return 0;
}
void __real_openacci_call(const char *file_name, int line_number, const char *function_name);
static const char* capture_kernel_launch = NULL;
void __wrap_openacci_call(const char *file_name, int line_number, const char *function_name)
{
if (__builtin_expect(wrapper_funcname != NULL, 1))
{
if (!strcmp(wrapper_funcname, file_name) &&
(wrapper_lineno == line_number))
{
map<string, int>::iterator it = regcounts.find(function_name);
if (it == regcounts.end())
{
// Capture and output regcount for the next launch.
capture_kernel_launch = function_name;
}
else
{
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
wrapper_lineno, it->second);
}
}
}
__real_openacci_call(file_name, line_number, function_name);
}
bool timing = false;
struct timespec kernel_start, kernel_finish;
typedef CUresult (*cuLaunchKernel_t)(
CUfunction,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
unsigned int,
CUstream,
void **,
void **);
static cuLaunchKernel_t cuLaunchKernel_ = NULL;
CUresult cuLaunchKernel(
CUfunction f,
unsigned int gridDimX,
unsigned int gridDimY,
unsigned int gridDimZ,
unsigned int blockDimX,
unsigned int blockDimY,
unsigned int blockDimZ,
unsigned int sharedMemBytes,
CUstream hStream,
void **kernelParams,
void **extra)
{
if (capture_kernel_launch)
{
int regcount = -1;
CUresult curesult = cuFuncGetAttribute(®count, CU_FUNC_ATTRIBUTE_NUM_REGS, f);
if (curesult != CUDA_SUCCESS)
{
fprintf(stderr, "Failed to determine regcount for function %s\n", wrapper_funcname);
exit(-1);
}
regcounts[capture_kernel_launch] = regcount;
fprintf(stderr, "%s:%ld regcount = %d\n", wrapper_funcname,
wrapper_lineno, regcount);
capture_kernel_launch = NULL;
}
// Measure kernel time.
get_time(&kernel_start);
timing = true;
return cuLaunchKernel_(f,
gridDimX, gridDimY, gridDimZ,
blockDimX, blockDimY, blockDimZ,
sharedMemBytes, hStream, kernelParams, extra);
}
typedef CUresult (*cuCtxSynchronize_t)();
static cuCtxSynchronize_t cuCtxSynchronize_ = NULL;
CUresult cuCtxSynchronize()
{
CUresult result = cuCtxSynchronize_();
if (timing)
{
get_time(&kernel_finish);
fprintf(stderr, "%s:%ld time = %f\n", wrapper_funcname,
wrapper_lineno, get_time_diff(&kernel_start, &kernel_finish));
timing = false;
}
return result;
}
void* __libc_dlsym(void* handle, const char* symname) __THROW;
void* dlsym(void* handle, const char* symname) __THROW
{
void* addr = __libc_dlsym(handle, symname);
if (!strcmp(symname, "cuLaunchKernel"))
{
cuLaunchKernel_ = (cuLaunchKernel_t)addr;
return (void*)&cuLaunchKernel;
}
if (!strcmp(symname, "cuCtxSynchronize"))
{
cuCtxSynchronize_ = (cuCtxSynchronize_t)addr;
return (void*)&cuCtxSynchronize;
}
return addr;
}
#ifdef __cplusplus
}
#endif
|
5f6d1773a5fa43343c9af9d2268fb4f414d5ddd9.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "MolecularForceCompute.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/gather.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/scatter.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
#include <hipcub/hipcub.hpp>
#include <exception>
#include <string>
#define CHECK_CUDA() \
{ \
hipError_t err = hipDeviceSynchronize(); \
if (err != hipSuccess) \
{ \
throw std::runtime_error("CUDA error in MolecularForceCompute " \
+ std::string(hipGetErrorString(err))); \
} \
err = hipGetLastError(); \
if (err != hipSuccess) \
{ \
throw std::runtime_error("CUDA error " + std::string(hipGetErrorString(err))); \
} \
}
/*! \file MolecularForceCompute.cu
\brief Contains GPU kernel code used by MolecularForceCompute
*/
namespace hoomd
{
namespace md
{
namespace kernel
{
//! Sort local molecules and assign local molecule indices to particles
hipError_t gpu_sort_by_molecule(unsigned int nptl,
const unsigned int* d_tag,
const unsigned int* d_molecule_tag,
unsigned int* d_local_molecule_tags,
unsigned int* d_local_molecules_lowest_idx,
unsigned int* d_local_unique_molecule_tags,
unsigned int* d_local_molecule_idx,
unsigned int* d_sorted_by_tag,
unsigned int* d_idx_sorted_by_tag,
unsigned int* d_idx_sorted_by_molecule_and_tag,
unsigned int* d_lowest_idx,
unsigned int* d_lowest_idx_sort,
unsigned int* d_lowest_idx_in_molecules,
unsigned int* d_lowest_idx_by_molecule_tag,
unsigned int* d_molecule_length,
unsigned int& n_local_molecules,
unsigned int& max_len,
unsigned int& n_local_ptls_in_molecules,
CachedAllocator& alloc,
bool check_cuda)
{
thrust::device_ptr<const unsigned int> tag(d_tag);
thrust::device_ptr<const unsigned int> molecule_tag(d_molecule_tag);
thrust::device_ptr<unsigned int> local_molecule_tags(d_local_molecule_tags);
thrust::device_ptr<unsigned int> local_unique_molecule_tags(d_local_unique_molecule_tags);
thrust::device_ptr<unsigned int> local_molecule_idx(d_local_molecule_idx);
thrust::device_ptr<unsigned int> sorted_by_tag(d_sorted_by_tag);
thrust::device_ptr<unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag);
thrust::device_ptr<unsigned int> molecule_length(d_molecule_length);
// get temp allocations
unsigned int* d_molecule_length_tmp = alloc.getTemporaryBuffer<unsigned int>(nptl);
unsigned int* d_local_unique_molecule_tags_tmp = alloc.getTemporaryBuffer<unsigned int>(nptl);
// sort local particles by tag
// store ascending index in temp buffer
unsigned int* d_idx = alloc.getTemporaryBuffer<unsigned int>(nptl);
thrust::device_ptr<unsigned int> idx(d_idx);
auto iter = thrust::counting_iterator<unsigned int>(0);
thrust::copy(iter, iter + nptl, idx);
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_tag,
d_sorted_by_tag,
d_idx,
d_idx_sorted_by_tag,
nptl);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_tag,
d_sorted_by_tag,
d_idx,
d_idx_sorted_by_tag,
nptl);
alloc.deallocate((char*)d_temp_storage);
// release temp buffer
alloc.deallocate((char*)d_idx);
unsigned int* d_num_runs_out = (unsigned int*)alloc.allocate(sizeof(unsigned int));
auto molecule_tag_lookup = thrust::make_permutation_iterator(molecule_tag, tag);
auto molecule_tag_lookup_sorted_by_tag
= thrust::make_permutation_iterator(molecule_tag_lookup, idx_sorted_by_tag);
// get temp buffers
unsigned int* d_molecule_by_idx = alloc.getTemporaryBuffer<unsigned int>(nptl);
thrust::device_ptr<unsigned int> molecule_by_idx(d_molecule_by_idx);
#ifdef __HIP_PLATFORM_HCC__
thrust::copy(thrust::hip::par(alloc),
#else
thrust::copy(thrust::hip::par(alloc),
#endif
molecule_tag_lookup_sorted_by_tag,
molecule_tag_lookup_sorted_by_tag + nptl,
molecule_by_idx);
if (check_cuda)
CHECK_CUDA();
// sort local particle indices by global molecule tag, keeping tag order (radix sort is stable)
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_molecule_by_idx,
d_local_molecule_tags,
d_idx_sorted_by_tag,
d_idx_sorted_by_molecule_and_tag,
nptl);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_molecule_by_idx,
d_local_molecule_tags,
d_idx_sorted_by_tag,
d_idx_sorted_by_molecule_and_tag,
nptl);
alloc.deallocate((char*)d_temp_storage);
// release temp buffer
alloc.deallocate((char*)d_molecule_by_idx);
// find the end of the molecule list
auto end = thrust::lower_bound(local_molecule_tags, local_molecule_tags + nptl, NO_MOLECULE);
if (check_cuda)
CHECK_CUDA();
n_local_ptls_in_molecules = (unsigned int)(end - local_molecule_tags);
// gather unique molecule tags, and reduce their lengths by key
thrust::constant_iterator<unsigned int> one(1);
// determine temporary storage
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_local_molecule_tags,
d_local_unique_molecule_tags_tmp,
one,
d_molecule_length_tmp,
d_num_runs_out,
thrust::plus<unsigned int>(),
n_local_ptls_in_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_local_molecule_tags,
d_local_unique_molecule_tags_tmp,
one,
d_molecule_length_tmp,
d_num_runs_out,
thrust::plus<unsigned int>(),
n_local_ptls_in_molecules);
hipMemcpy(&n_local_molecules, d_num_runs_out, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (check_cuda)
CHECK_CUDA();
alloc.deallocate((char*)d_temp_storage);
alloc.deallocate((char*)d_num_runs_out);
// find the index of the particle with lowest tag in every molecule
thrust::device_ptr<unsigned int> lowest_idx_in_molecules(d_lowest_idx_in_molecules);
thrust::device_ptr<unsigned int> lowest_idx(d_lowest_idx);
thrust::device_ptr<unsigned int> local_unique_molecule_tags_tmp(
d_local_unique_molecule_tags_tmp);
thrust::lower_bound(local_molecule_tags,
local_molecule_tags + n_local_ptls_in_molecules,
local_unique_molecule_tags_tmp,
local_unique_molecule_tags_tmp + n_local_molecules,
lowest_idx_in_molecules);
if (check_cuda)
CHECK_CUDA();
thrust::device_ptr<unsigned int> idx_sorted_by_molecule_and_tag(
d_idx_sorted_by_molecule_and_tag);
thrust::gather(lowest_idx_in_molecules,
lowest_idx_in_molecules + n_local_molecules,
idx_sorted_by_molecule_and_tag,
lowest_idx);
if (check_cuda)
CHECK_CUDA();
// compute maximum molecule length
d_temp_storage = NULL;
temp_storage_bytes = 0;
unsigned int* d_max = (unsigned int*)alloc.allocate(sizeof(unsigned int));
hipcub::DeviceReduce::Max(d_temp_storage,
temp_storage_bytes,
d_molecule_length_tmp,
d_max,
n_local_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceReduce::Max(d_temp_storage,
temp_storage_bytes,
d_molecule_length_tmp,
d_max,
n_local_molecules);
alloc.deallocate((char*)d_temp_storage);
hipMemcpy(&max_len, d_max, sizeof(unsigned int), hipMemcpyDeviceToHost);
alloc.deallocate((char*)d_max);
if (check_cuda)
CHECK_CUDA();
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_local_unique_molecule_tags_tmp,
d_local_unique_molecule_tags,
n_local_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_local_unique_molecule_tags_tmp,
d_local_unique_molecule_tags,
n_local_molecules);
alloc.deallocate((char*)d_temp_storage);
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_molecule_length_tmp,
d_molecule_length,
n_local_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_molecule_length_tmp,
d_molecule_length,
n_local_molecules);
alloc.deallocate((char*)d_temp_storage);
// release temp buffers
alloc.deallocate((char*)d_molecule_length_tmp);
alloc.deallocate((char*)d_local_unique_molecule_tags_tmp);
// create a global lookup table for lowest idx by molecule tag
thrust::device_ptr<unsigned int> lowest_idx_by_molecule_tag(d_lowest_idx_by_molecule_tag);
thrust::device_ptr<unsigned int> lowest_idx_sort(d_lowest_idx_sort);
thrust::scatter(lowest_idx_sort,
lowest_idx_sort + n_local_molecules,
local_unique_molecule_tags,
lowest_idx_by_molecule_tag);
if (check_cuda)
CHECK_CUDA();
// sort the list of particles in molecules again according to first particle index, keeping
// order in molecule
auto lowest_idx_by_ptl_in_molecule
= thrust::make_permutation_iterator(lowest_idx_by_molecule_tag, local_molecule_tags);
if (check_cuda)
CHECK_CUDA();
// get temp buffer
unsigned int* d_local_molecules_lowest_idx_unsorted
= alloc.getTemporaryBuffer<unsigned int>(n_local_ptls_in_molecules);
thrust::device_ptr<unsigned int> local_molecules_lowest_idx_unsorted(
d_local_molecules_lowest_idx_unsorted);
#ifdef __HIP_PLATFORM_HCC__
thrust::copy(thrust::hip::par(alloc),
#else
thrust::copy(thrust::hip::par(alloc),
#endif
lowest_idx_by_ptl_in_molecule,
lowest_idx_by_ptl_in_molecule + n_local_ptls_in_molecules,
local_molecules_lowest_idx_unsorted);
if (check_cuda)
CHECK_CUDA();
// radix sort is stable
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_local_molecules_lowest_idx_unsorted,
d_local_molecules_lowest_idx,
d_idx_sorted_by_molecule_and_tag,
d_idx_sorted_by_tag,
n_local_ptls_in_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_local_molecules_lowest_idx_unsorted,
d_local_molecules_lowest_idx,
d_idx_sorted_by_molecule_and_tag,
d_idx_sorted_by_tag,
n_local_ptls_in_molecules);
alloc.deallocate((char*)d_temp_storage);
// release temp buffer
alloc.deallocate((char*)d_local_molecules_lowest_idx_unsorted);
// assign local molecule tags to particles
thrust::fill(local_molecule_idx, local_molecule_idx + nptl, NO_MOLECULE);
auto idx_lookup = thrust::make_permutation_iterator(local_molecule_idx, idx_sorted_by_tag);
thrust::device_ptr<unsigned int> local_molecules_lowest_idx(d_local_molecules_lowest_idx);
thrust::lower_bound(lowest_idx_sort,
lowest_idx_sort + n_local_molecules,
local_molecules_lowest_idx,
local_molecules_lowest_idx + n_local_ptls_in_molecules,
idx_lookup);
if (check_cuda)
CHECK_CUDA();
return hipSuccess;
}
__global__ void gpu_fill_molecule_table_kernel(unsigned int nptl,
Index2D molecule_idx,
const unsigned int* d_molecule_idx,
unsigned int* d_molecule_list,
unsigned int* d_molecule_order)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nptl)
return;
unsigned int molidx = d_molecule_idx[idx];
if (molidx != NO_MOLECULE)
d_molecule_list[molecule_idx(d_molecule_order[idx], molidx)] = idx;
}
hipError_t gpu_fill_molecule_table(unsigned int nptl,
unsigned int n_local_ptls_in_molecules,
Index2D molecule_idx,
const unsigned int* d_molecule_idx,
const unsigned int* d_local_molecule_tags,
const unsigned int* d_idx_sorted_by_tag,
unsigned int* d_molecule_list,
unsigned int* d_molecule_order,
unsigned int block_size,
CachedAllocator& alloc)
{
thrust::device_ptr<unsigned int> molecule_order(d_molecule_order);
thrust::device_ptr<const unsigned int> local_molecule_tags(d_local_molecule_tags);
thrust::device_ptr<const unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag);
auto idx_lookup = thrust::make_permutation_iterator(molecule_order, idx_sorted_by_tag);
// generate ascending index for every molecule
thrust::constant_iterator<unsigned int> one(1);
#ifdef __HIP_PLATFORM_HCC__
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#else
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#endif
local_molecule_tags,
local_molecule_tags + n_local_ptls_in_molecules,
one,
idx_lookup);
// write out the table
hipLaunchKernelGGL((gpu_fill_molecule_table_kernel),
dim3(nptl / block_size + 1),
dim3(block_size),
0,
0,
nptl,
molecule_idx,
d_molecule_idx,
d_molecule_list,
d_molecule_order);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
| 5f6d1773a5fa43343c9af9d2268fb4f414d5ddd9.cu | // Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "MolecularForceCompute.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/gather.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/scatter.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
#include <hipcub/hipcub.hpp>
#include <exception>
#include <string>
#define CHECK_CUDA() \
{ \
hipError_t err = hipDeviceSynchronize(); \
if (err != hipSuccess) \
{ \
throw std::runtime_error("CUDA error in MolecularForceCompute " \
+ std::string(hipGetErrorString(err))); \
} \
err = hipGetLastError(); \
if (err != hipSuccess) \
{ \
throw std::runtime_error("CUDA error " + std::string(hipGetErrorString(err))); \
} \
}
/*! \file MolecularForceCompute.cu
\brief Contains GPU kernel code used by MolecularForceCompute
*/
namespace hoomd
{
namespace md
{
namespace kernel
{
//! Sort local molecules and assign local molecule indices to particles
hipError_t gpu_sort_by_molecule(unsigned int nptl,
const unsigned int* d_tag,
const unsigned int* d_molecule_tag,
unsigned int* d_local_molecule_tags,
unsigned int* d_local_molecules_lowest_idx,
unsigned int* d_local_unique_molecule_tags,
unsigned int* d_local_molecule_idx,
unsigned int* d_sorted_by_tag,
unsigned int* d_idx_sorted_by_tag,
unsigned int* d_idx_sorted_by_molecule_and_tag,
unsigned int* d_lowest_idx,
unsigned int* d_lowest_idx_sort,
unsigned int* d_lowest_idx_in_molecules,
unsigned int* d_lowest_idx_by_molecule_tag,
unsigned int* d_molecule_length,
unsigned int& n_local_molecules,
unsigned int& max_len,
unsigned int& n_local_ptls_in_molecules,
CachedAllocator& alloc,
bool check_cuda)
{
thrust::device_ptr<const unsigned int> tag(d_tag);
thrust::device_ptr<const unsigned int> molecule_tag(d_molecule_tag);
thrust::device_ptr<unsigned int> local_molecule_tags(d_local_molecule_tags);
thrust::device_ptr<unsigned int> local_unique_molecule_tags(d_local_unique_molecule_tags);
thrust::device_ptr<unsigned int> local_molecule_idx(d_local_molecule_idx);
thrust::device_ptr<unsigned int> sorted_by_tag(d_sorted_by_tag);
thrust::device_ptr<unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag);
thrust::device_ptr<unsigned int> molecule_length(d_molecule_length);
// get temp allocations
unsigned int* d_molecule_length_tmp = alloc.getTemporaryBuffer<unsigned int>(nptl);
unsigned int* d_local_unique_molecule_tags_tmp = alloc.getTemporaryBuffer<unsigned int>(nptl);
// sort local particles by tag
// store ascending index in temp buffer
unsigned int* d_idx = alloc.getTemporaryBuffer<unsigned int>(nptl);
thrust::device_ptr<unsigned int> idx(d_idx);
auto iter = thrust::counting_iterator<unsigned int>(0);
thrust::copy(iter, iter + nptl, idx);
// Determine temporary device storage requirements
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_tag,
d_sorted_by_tag,
d_idx,
d_idx_sorted_by_tag,
nptl);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_tag,
d_sorted_by_tag,
d_idx,
d_idx_sorted_by_tag,
nptl);
alloc.deallocate((char*)d_temp_storage);
// release temp buffer
alloc.deallocate((char*)d_idx);
unsigned int* d_num_runs_out = (unsigned int*)alloc.allocate(sizeof(unsigned int));
auto molecule_tag_lookup = thrust::make_permutation_iterator(molecule_tag, tag);
auto molecule_tag_lookup_sorted_by_tag
= thrust::make_permutation_iterator(molecule_tag_lookup, idx_sorted_by_tag);
// get temp buffers
unsigned int* d_molecule_by_idx = alloc.getTemporaryBuffer<unsigned int>(nptl);
thrust::device_ptr<unsigned int> molecule_by_idx(d_molecule_by_idx);
#ifdef __HIP_PLATFORM_HCC__
thrust::copy(thrust::hip::par(alloc),
#else
thrust::copy(thrust::cuda::par(alloc),
#endif
molecule_tag_lookup_sorted_by_tag,
molecule_tag_lookup_sorted_by_tag + nptl,
molecule_by_idx);
if (check_cuda)
CHECK_CUDA();
// sort local particle indices by global molecule tag, keeping tag order (radix sort is stable)
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_molecule_by_idx,
d_local_molecule_tags,
d_idx_sorted_by_tag,
d_idx_sorted_by_molecule_and_tag,
nptl);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_molecule_by_idx,
d_local_molecule_tags,
d_idx_sorted_by_tag,
d_idx_sorted_by_molecule_and_tag,
nptl);
alloc.deallocate((char*)d_temp_storage);
// release temp buffer
alloc.deallocate((char*)d_molecule_by_idx);
// find the end of the molecule list
auto end = thrust::lower_bound(local_molecule_tags, local_molecule_tags + nptl, NO_MOLECULE);
if (check_cuda)
CHECK_CUDA();
n_local_ptls_in_molecules = (unsigned int)(end - local_molecule_tags);
// gather unique molecule tags, and reduce their lengths by key
thrust::constant_iterator<unsigned int> one(1);
// determine temporary storage
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_local_molecule_tags,
d_local_unique_molecule_tags_tmp,
one,
d_molecule_length_tmp,
d_num_runs_out,
thrust::plus<unsigned int>(),
n_local_ptls_in_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceReduce::ReduceByKey(d_temp_storage,
temp_storage_bytes,
d_local_molecule_tags,
d_local_unique_molecule_tags_tmp,
one,
d_molecule_length_tmp,
d_num_runs_out,
thrust::plus<unsigned int>(),
n_local_ptls_in_molecules);
hipMemcpy(&n_local_molecules, d_num_runs_out, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (check_cuda)
CHECK_CUDA();
alloc.deallocate((char*)d_temp_storage);
alloc.deallocate((char*)d_num_runs_out);
// find the index of the particle with lowest tag in every molecule
thrust::device_ptr<unsigned int> lowest_idx_in_molecules(d_lowest_idx_in_molecules);
thrust::device_ptr<unsigned int> lowest_idx(d_lowest_idx);
thrust::device_ptr<unsigned int> local_unique_molecule_tags_tmp(
d_local_unique_molecule_tags_tmp);
thrust::lower_bound(local_molecule_tags,
local_molecule_tags + n_local_ptls_in_molecules,
local_unique_molecule_tags_tmp,
local_unique_molecule_tags_tmp + n_local_molecules,
lowest_idx_in_molecules);
if (check_cuda)
CHECK_CUDA();
thrust::device_ptr<unsigned int> idx_sorted_by_molecule_and_tag(
d_idx_sorted_by_molecule_and_tag);
thrust::gather(lowest_idx_in_molecules,
lowest_idx_in_molecules + n_local_molecules,
idx_sorted_by_molecule_and_tag,
lowest_idx);
if (check_cuda)
CHECK_CUDA();
// compute maximum molecule length
d_temp_storage = NULL;
temp_storage_bytes = 0;
unsigned int* d_max = (unsigned int*)alloc.allocate(sizeof(unsigned int));
hipcub::DeviceReduce::Max(d_temp_storage,
temp_storage_bytes,
d_molecule_length_tmp,
d_max,
n_local_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceReduce::Max(d_temp_storage,
temp_storage_bytes,
d_molecule_length_tmp,
d_max,
n_local_molecules);
alloc.deallocate((char*)d_temp_storage);
hipMemcpy(&max_len, d_max, sizeof(unsigned int), hipMemcpyDeviceToHost);
alloc.deallocate((char*)d_max);
if (check_cuda)
CHECK_CUDA();
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_local_unique_molecule_tags_tmp,
d_local_unique_molecule_tags,
n_local_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_local_unique_molecule_tags_tmp,
d_local_unique_molecule_tags,
n_local_molecules);
alloc.deallocate((char*)d_temp_storage);
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_molecule_length_tmp,
d_molecule_length,
n_local_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
// key-value sort
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_lowest_idx,
d_lowest_idx_sort,
d_molecule_length_tmp,
d_molecule_length,
n_local_molecules);
alloc.deallocate((char*)d_temp_storage);
// release temp buffers
alloc.deallocate((char*)d_molecule_length_tmp);
alloc.deallocate((char*)d_local_unique_molecule_tags_tmp);
// create a global lookup table for lowest idx by molecule tag
thrust::device_ptr<unsigned int> lowest_idx_by_molecule_tag(d_lowest_idx_by_molecule_tag);
thrust::device_ptr<unsigned int> lowest_idx_sort(d_lowest_idx_sort);
thrust::scatter(lowest_idx_sort,
lowest_idx_sort + n_local_molecules,
local_unique_molecule_tags,
lowest_idx_by_molecule_tag);
if (check_cuda)
CHECK_CUDA();
// sort the list of particles in molecules again according to first particle index, keeping
// order in molecule
auto lowest_idx_by_ptl_in_molecule
= thrust::make_permutation_iterator(lowest_idx_by_molecule_tag, local_molecule_tags);
if (check_cuda)
CHECK_CUDA();
// get temp buffer
unsigned int* d_local_molecules_lowest_idx_unsorted
= alloc.getTemporaryBuffer<unsigned int>(n_local_ptls_in_molecules);
thrust::device_ptr<unsigned int> local_molecules_lowest_idx_unsorted(
d_local_molecules_lowest_idx_unsorted);
#ifdef __HIP_PLATFORM_HCC__
thrust::copy(thrust::hip::par(alloc),
#else
thrust::copy(thrust::cuda::par(alloc),
#endif
lowest_idx_by_ptl_in_molecule,
lowest_idx_by_ptl_in_molecule + n_local_ptls_in_molecules,
local_molecules_lowest_idx_unsorted);
if (check_cuda)
CHECK_CUDA();
// radix sort is stable
d_temp_storage = NULL;
temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_local_molecules_lowest_idx_unsorted,
d_local_molecules_lowest_idx,
d_idx_sorted_by_molecule_and_tag,
d_idx_sorted_by_tag,
n_local_ptls_in_molecules);
d_temp_storage = alloc.allocate(temp_storage_bytes);
hipcub::DeviceRadixSort::SortPairs(d_temp_storage,
temp_storage_bytes,
d_local_molecules_lowest_idx_unsorted,
d_local_molecules_lowest_idx,
d_idx_sorted_by_molecule_and_tag,
d_idx_sorted_by_tag,
n_local_ptls_in_molecules);
alloc.deallocate((char*)d_temp_storage);
// release temp buffer
alloc.deallocate((char*)d_local_molecules_lowest_idx_unsorted);
// assign local molecule tags to particles
thrust::fill(local_molecule_idx, local_molecule_idx + nptl, NO_MOLECULE);
auto idx_lookup = thrust::make_permutation_iterator(local_molecule_idx, idx_sorted_by_tag);
thrust::device_ptr<unsigned int> local_molecules_lowest_idx(d_local_molecules_lowest_idx);
thrust::lower_bound(lowest_idx_sort,
lowest_idx_sort + n_local_molecules,
local_molecules_lowest_idx,
local_molecules_lowest_idx + n_local_ptls_in_molecules,
idx_lookup);
if (check_cuda)
CHECK_CUDA();
return hipSuccess;
}
__global__ void gpu_fill_molecule_table_kernel(unsigned int nptl,
Index2D molecule_idx,
const unsigned int* d_molecule_idx,
unsigned int* d_molecule_list,
unsigned int* d_molecule_order)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= nptl)
return;
unsigned int molidx = d_molecule_idx[idx];
if (molidx != NO_MOLECULE)
d_molecule_list[molecule_idx(d_molecule_order[idx], molidx)] = idx;
}
hipError_t gpu_fill_molecule_table(unsigned int nptl,
unsigned int n_local_ptls_in_molecules,
Index2D molecule_idx,
const unsigned int* d_molecule_idx,
const unsigned int* d_local_molecule_tags,
const unsigned int* d_idx_sorted_by_tag,
unsigned int* d_molecule_list,
unsigned int* d_molecule_order,
unsigned int block_size,
CachedAllocator& alloc)
{
thrust::device_ptr<unsigned int> molecule_order(d_molecule_order);
thrust::device_ptr<const unsigned int> local_molecule_tags(d_local_molecule_tags);
thrust::device_ptr<const unsigned int> idx_sorted_by_tag(d_idx_sorted_by_tag);
auto idx_lookup = thrust::make_permutation_iterator(molecule_order, idx_sorted_by_tag);
// generate ascending index for every molecule
thrust::constant_iterator<unsigned int> one(1);
#ifdef __HIP_PLATFORM_HCC__
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#else
thrust::exclusive_scan_by_key(thrust::cuda::par(alloc),
#endif
local_molecule_tags,
local_molecule_tags + n_local_ptls_in_molecules,
one,
idx_lookup);
// write out the table
hipLaunchKernelGGL((gpu_fill_molecule_table_kernel),
dim3(nptl / block_size + 1),
dim3(block_size),
0,
0,
nptl,
molecule_idx,
d_molecule_idx,
d_molecule_list,
d_molecule_order);
return hipSuccess;
}
} // end namespace kernel
} // end namespace md
} // end namespace hoomd
|
7227b5cabf90752c2143c127cc9b4bee7dd96e2d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "MatrixOp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr = NULL;
hipMalloc(&arr, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
MatrixOp), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
MatrixOp), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
MatrixOp), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7227b5cabf90752c2143c127cc9b4bee7dd96e2d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "MatrixOp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr = NULL;
cudaMalloc(&arr, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
MatrixOp<<<gridBlock,threadBlock>>>(arr,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
MatrixOp<<<gridBlock,threadBlock>>>(arr,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
MatrixOp<<<gridBlock,threadBlock>>>(arr,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d22c99b415f116b2a93ad9b8c4692844d3299d7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel_1(int n, float* x, float* y)
{
for (int i = 0; i < n; ++i)
{
y[i] = x[i] + y[i];
}
}
__global__ void addKernel_256(int n, float* x, float* y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
__global__ void addKernel_256xN(int n, float* x, float* y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda_1(int n, float* x, float* y)
{
float* dev_x = 0;
float* dev_y = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_x, n * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_y, n * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_x, x, n * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_y, y, n * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel_1), dim3(1), dim3(1), 0, 0, n, dev_x, dev_y);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(y, dev_y, n * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_x);
hipFree(dev_y);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda_256(int n, float* x, float* y)
{
float* dev_x = 0;
float* dev_y = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_x, n * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_y, n * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_x, x, n * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_y, y, n * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel_256), dim3(1), dim3(256), 0, 0, n, dev_x, dev_y);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(y, dev_y, n * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_x);
hipFree(dev_y);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda_256xN(int n, float* x, float* y)
{
float* dev_x = 0;
float* dev_y = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_x, n * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_y, n * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_x, x, n * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_y, y, n * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel_256xN), dim3(numBlocks), dim3(blockSize), 0, 0, n, dev_x, dev_y);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(y, dev_y, n * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_x);
hipFree(dev_y);
return cudaStatus;
}
| d22c99b415f116b2a93ad9b8c4692844d3299d7d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel_1(int n, float* x, float* y)
{
for (int i = 0; i < n; ++i)
{
y[i] = x[i] + y[i];
}
}
__global__ void addKernel_256(int n, float* x, float* y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
__global__ void addKernel_256xN(int n, float* x, float* y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
y[i] = x[i] + y[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda_1(int n, float* x, float* y)
{
float* dev_x = 0;
float* dev_y = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_x, n * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_y, n * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_x, x, n * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_y, y, n * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel_1<<<1, 1>>>(n, dev_x, dev_y);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(y, dev_y, n * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_x);
cudaFree(dev_y);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda_256(int n, float* x, float* y)
{
float* dev_x = 0;
float* dev_y = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_x, n * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_y, n * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_x, x, n * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_y, y, n * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel_256<<<1, 256>>>(n, dev_x, dev_y);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(y, dev_y, n * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_x);
cudaFree(dev_y);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda_256xN(int n, float* x, float* y)
{
float* dev_x = 0;
float* dev_y = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_x, n * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_y, n * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_x, x, n * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_y, y, n * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
addKernel_256xN<<<numBlocks, blockSize>>>(n, dev_x, dev_y);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(y, dev_y, n * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_x);
cudaFree(dev_y);
return cudaStatus;
}
|
741a2bf00f585ee4b37349879717384302b2b1d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
using namespace std;
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <cstring>
__global__ void mem_transfer_test(int* input) {
int tid = threadIdx.x;
int offset = blockDim.x * blockIdx.x;
int gid = offset + tid;
printf("threadIdx_X: %d, value : %d, GlobalIdx_X: %d \n", tid, input[gid], gid);
}
int main() {
int size = 128;
int array_byte_size = sizeof(int) * size;
int * h_input;
h_input = (int*)malloc(array_byte_size);
time_t t;
srand((unsigned)time(&t));
// srand(time(NULL));
for (int i = 0; i < size; i++) {
h_input[i] = (int)(rand() & 0xff);
}
//cout << h_input;
int * d_input;
hipMalloc((void**)&d_input, array_byte_size);
hipMemcpy(d_input, h_input, array_byte_size, hipMemcpyHostToDevice);
dim3 block(64);
dim3 grid(2);
mem_transfer_test << <grid, block >> > (d_input);
hipDeviceSynchronize();
hipFree(d_input);
free(h_input);
hipDeviceReset();
cout << *h_input;
return 0;
} | 741a2bf00f585ee4b37349879717384302b2b1d1.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
using namespace std;
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <cstring>
__global__ void mem_transfer_test(int* input) {
int tid = threadIdx.x;
int offset = blockDim.x * blockIdx.x;
int gid = offset + tid;
printf("threadIdx_X: %d, value : %d, GlobalIdx_X: %d \n", tid, input[gid], gid);
}
int main() {
int size = 128;
int array_byte_size = sizeof(int) * size;
int * h_input;
h_input = (int*)malloc(array_byte_size);
time_t t;
srand((unsigned)time(&t));
// srand(time(NULL));
for (int i = 0; i < size; i++) {
h_input[i] = (int)(rand() & 0xff);
}
//cout << h_input;
int * d_input;
cudaMalloc((void**)&d_input, array_byte_size);
cudaMemcpy(d_input, h_input, array_byte_size, cudaMemcpyHostToDevice);
dim3 block(64);
dim3 grid(2);
mem_transfer_test << <grid, block >> > (d_input);
cudaDeviceSynchronize();
cudaFree(d_input);
free(h_input);
cudaDeviceReset();
cout << *h_input;
return 0;
} |
881bedb0e05cc18fc437dc76d6f3dc33fdf28cc9.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com
All rights reserved. https://github.com/robertwgh/cuLDPC
Implementation of LDPC decoding algorithm.
The details of implementation can be found from the following papers:
1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE.
2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE.
The current release is close to the GlobalSIP2013 paper.
*/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "LDPC.h"
#include "matrix.h"
#include "kernel.hip"
float sigma ;
int *info_bin ;
int main()
{
printf("GPU LDPC Decoder\r\nComputing...\r\n");
// For cnp kernel
#if MODE == WIMAX
const char h_element_count1[BLK_ROW] = {6, 7, 7, 6, 6, 7, 6, 6, 7, 6, 6, 6};
const char h_element_count2[BLK_COL] = {3, 3, 6, 3, 3, 6, 3, 6, 3, 6, 3, 6, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#else
const char h_element_count1[BLK_ROW] = {7, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 8};
const char h_element_count2[BLK_COL] = {11,4, 3, 3,11, 3, 3, 3,11, 3, 3, 3, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#endif
h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R
h_element h_element_temp;
// init the compact matrix
for(int i = 0; i < H_COMPACT1_COL; i++)
{
for(int j = 0; j < H_COMPACT1_ROW; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column
}
}
// scan the h matrix, and gengerate compact mode of h
for(int i = 0; i < BLK_ROW; i++)
{
int k = 0;
for(int j = 0; j < BLK_COL; j ++)
{
if(h_base[i][j] != -1)
{
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact1[k][i] = h_element_temp;
k++;
}
}
// printf("row %d, #element=%d\n", i, k);
}
// h_compact2
h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr
// init the compact matrix
for(int i = 0; i < H_COMPACT2_ROW; i++)
{
for(int j = 0; j < H_COMPACT2_COL; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact2[i][j] = h_element_temp;
}
}
for(int j = 0; j < BLK_COL; j++)
{
int k = 0;
for(int i = 0; i < BLK_ROW; i ++)
{
if(h_base[i][j] != -1)
{
// although h is transposed, the (x,y) is still (iBlkRow, iBlkCol)
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact2[k][j] = h_element_temp;
k++;
}
}
}
//int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int);
int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element);
int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element);
int memorySize_infobits = INFO_LEN * sizeof(int);
int memorySize_codeword = CODEWORD_LEN * sizeof(int);
int memorySize_llr = CODEWORD_LEN * sizeof(float);
info_bin = (int *) malloc(memorySize_infobits) ;
int *codeword = (int *) malloc(memorySize_codeword) ;
float *trans = (float *) malloc(memorySize_llr) ;
float *recv = (float *) malloc(memorySize_llr) ;
float *llr = (float *) malloc(memorySize_llr) ;
float rate = (float)0.5f;
//////////////////////////////////////////////////////////////////////////////////
// all the variables Starting with _gpu is used in host code and for gpu computation
int memorySize_infobits_gpu = MCW * CW * memorySize_infobits ;
int memorySize_llr_gpu = MCW * CW * CODEWORD_LEN * sizeof(float);
int memorySize_dt_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_R_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_hard_decision_gpu = MCW * CW * CODEWORD_LEN * sizeof(int);
int *info_bin_gpu;
float *llr_gpu;
int * hard_decision_gpu;
info_bin_gpu = (int *) malloc(memorySize_infobits_gpu);
hard_decision_gpu = (int *) malloc(memorySize_hard_decision_gpu);
llr_gpu = (float *) malloc(memorySize_llr_gpu);
error_result this_error;
int total_frame_error = 0;
int total_bit_error = 0;
int total_codeword = 0;
// create device memory
float * dev_llr;
float * dev_dt;
float * dev_R;
int * dev_hard_decision;
h_element * dev_h_compact1;
h_element * dev_h_compact2;
char * dev_h_element_count1;
char * dev_h_element_count2;
hipMalloc((void **)&dev_llr, memorySize_llr_gpu);
hipMalloc((void **)&dev_dt, memorySize_dt_gpu);
hipMalloc((void **)&dev_R, memorySize_R_gpu);
hipMalloc((void **)&dev_hard_decision, memorySize_hard_decision_gpu);
hipMalloc((void **)&dev_h_compact1, memorySize_h_compact1);
hipMalloc((void **)&dev_h_compact2, memorySize_h_compact2);
hipMalloc((void **)&dev_h_element_count1, BLK_ROW);
hipMalloc((void **)&dev_h_element_count2, BLK_COL);
hipMemcpy(dev_h_element_count1, h_element_count1, BLK_ROW, hipMemcpyHostToDevice);
hipMemcpy(dev_h_element_count2, h_element_count2, BLK_COL, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact1, h_compact1, memorySize_h_compact1, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact2, h_compact2, memorySize_h_compact2, hipMemcpyHostToDevice);
srand(69012);
for(int snri = 0; snri < NUM_SNR; snri++)
{
float snr = snr_array[snri];
sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f)));
total_codeword = 0;
total_frame_error = 0;
total_bit_error = 0;
// Adjust MIN_CODWORD in LDPC.h to reduce simulation time
while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD))
{
total_codeword += CW * MCW;
for(int i = 0; i < CW * MCW; i++)
{
// generate random data
info_gen (info_bin, rand());
// encode the data
structure_encode (info_bin, codeword, h_base);
// BPSK modulation
modulation (codeword, trans);
// additive white Gaussian noise
awgn (trans, recv, rand());
// LLR init
llr_init (llr, recv);
// copy the info_bin and llr to the total memory
memcpy(info_bin_gpu + i * INFO_LEN, info_bin, memorySize_infobits);
memcpy(llr_gpu + i * CODEWORD_LEN, llr, memorySize_llr);
}
// Define kernel dimension
dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks
dim3 dimBlockKernel1(BLOCK_SIZE_X, CW, 1);
int sharedRCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT * sizeof(float);
dim3 dimGridKernel2(BLK_COL, MCW, 1);
dim3 dimBlockKernel2(BLOCK_SIZE_X, CW, 1);
//int sharedDtCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT_VNP * sizeof(float);
// run the kernel
float total_time = 0.f;
for(int j = 0; j < MAX_SIM; j++)
{
// Transfer LLR data into device.
hipMemcpy(dev_llr, llr_gpu, memorySize_llr_gpu, hipMemcpyHostToDevice);
// kernel launch
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int ii = 0; ii < MAX_ITERATION; ii++)
{
// run check-node processing kernel
// TODO: run a special kernel the first iteration?
if(ii == 0) {
hipLaunchKernelGGL(ldpc_cnp_kernel_1st_iter, dimGridKernel1, dimBlockKernel1, 0, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
} else {
hipLaunchKernelGGL(ldpc_cnp_kernel, dimGridKernel1, dimBlockKernel1, sharedRCacheSize, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
}
// run variable-node processing kernel
// for the last iteration we run a special
// kernel. this is because we can make a hard
// decision instead of writing back the belief
// for the value of each bit.
if(ii < MAX_ITERATION - 1) {
hipLaunchKernelGGL(ldpc_vnp_kernel_normal, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_h_element_count2,
dev_h_compact2);
} else {
hipLaunchKernelGGL(ldpc_vnp_kernel_last_iter, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_hard_decision,
dev_h_element_count2,
dev_h_compact2);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
// copy the decoded data from device to host
hipMemcpy(hard_decision_gpu,
dev_hard_decision,
memorySize_hard_decision_gpu,
hipMemcpyDeviceToHost);
this_error = error_check(info_bin_gpu, hard_decision_gpu);
total_bit_error += this_error.bit_error;
total_frame_error += this_error.frame_error;
} // end of MAX-SIM
printf ("\n");
printf ("Total kernel execution time: %f (s)\n", total_time * 1e-9f);
printf ("# codewords = %d, CW=%d, MCW=%d\n",total_codeword, CW, MCW);
printf ("total bit error = %d\n", total_bit_error);
printf ("total frame error = %d\n", total_frame_error);
printf ("BER = %1.2e, FER = %1.2e\n",
(float) total_bit_error/total_codeword/INFO_LEN,
(float) total_frame_error/total_codeword);
} // end of the MAX frame error.
}// end of the snr loop
hipFree(dev_llr);
hipFree(dev_dt);
hipFree(dev_R);
hipFree(dev_hard_decision);
hipFree(dev_h_compact1);
hipFree(dev_h_compact2);
hipFree(dev_h_element_count1);
hipFree(dev_h_element_count2);
free(info_bin);
free(codeword);
free(trans);
free(recv);
free(llr);
free(llr_gpu);
free(hard_decision_gpu);
free(info_bin_gpu);
return 0;
}
| 881bedb0e05cc18fc437dc76d6f3dc33fdf28cc9.cu | /* Copyright (c) 2011-2016, Robert Wang, email: robertwgh (at) gmail.com
All rights reserved. https://github.com/robertwgh/cuLDPC
Implementation of LDPC decoding algorithm.
The details of implementation can be found from the following papers:
1. Wang, G., Wu, M., Sun, Y., & Cavallaro, J. R. (2011, June). A massively parallel implementation of QC-LDPC decoder on GPU. In Application Specific Processors (SASP), 2011 IEEE 9th Symposium on (pp. 82-85). IEEE.
2. Wang, G., Wu, M., Yin, B., & Cavallaro, J. R. (2013, December). High throughput low latency LDPC decoding on GPU for SDR systems. In Global Conference on Signal and Information Processing (GlobalSIP), 2013 IEEE (pp. 1258-1261). IEEE.
The current release is close to the GlobalSIP2013 paper.
*/
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "LDPC.h"
#include "matrix.h"
#include "kernel.cu"
float sigma ;
int *info_bin ;
int main()
{
printf("GPU LDPC Decoder\r\nComputing...\r\n");
// For cnp kernel
#if MODE == WIMAX
const char h_element_count1[BLK_ROW] = {6, 7, 7, 6, 6, 7, 6, 6, 7, 6, 6, 6};
const char h_element_count2[BLK_COL] = {3, 3, 6, 3, 3, 6, 3, 6, 3, 6, 3, 6, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#else
const char h_element_count1[BLK_ROW] = {7, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 8};
const char h_element_count2[BLK_COL] = {11,4, 3, 3,11, 3, 3, 3,11, 3, 3, 3, \
3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2};
#endif
h_element h_compact1 [H_COMPACT1_COL][H_COMPACT1_ROW]; // for update dt, R
h_element h_element_temp;
// init the compact matrix
for(int i = 0; i < H_COMPACT1_COL; i++)
{
for(int j = 0; j < H_COMPACT1_ROW; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact1[i][j] = h_element_temp; // h[i][0-11], the same column
}
}
// scan the h matrix, and gengerate compact mode of h
for(int i = 0; i < BLK_ROW; i++)
{
int k = 0;
for(int j = 0; j < BLK_COL; j ++)
{
if(h_base[i][j] != -1)
{
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact1[k][i] = h_element_temp;
k++;
}
}
// printf("row %d, #element=%d\n", i, k);
}
// h_compact2
h_element h_compact2 [H_COMPACT2_ROW][H_COMPACT2_COL]; // for update llr
// init the compact matrix
for(int i = 0; i < H_COMPACT2_ROW; i++)
{
for(int j = 0; j < H_COMPACT2_COL; j ++)
{
h_element_temp.x = 0;
h_element_temp.y = 0;
h_element_temp.value = -1;
h_element_temp.valid = 0;
h_compact2[i][j] = h_element_temp;
}
}
for(int j = 0; j < BLK_COL; j++)
{
int k = 0;
for(int i = 0; i < BLK_ROW; i ++)
{
if(h_base[i][j] != -1)
{
// although h is transposed, the (x,y) is still (iBlkRow, iBlkCol)
h_element_temp.x = i;
h_element_temp.y = j;
h_element_temp.value = h_base[i][j];
h_element_temp.valid = 1;
h_compact2[k][j] = h_element_temp;
k++;
}
}
}
//int memorySize_h_base = BLK_ROW * BLK_COL * sizeof(int);
int memorySize_h_compact1 = H_COMPACT1_ROW * H_COMPACT1_COL * sizeof(h_element);
int memorySize_h_compact2 = H_COMPACT2_ROW * H_COMPACT2_COL * sizeof(h_element);
int memorySize_infobits = INFO_LEN * sizeof(int);
int memorySize_codeword = CODEWORD_LEN * sizeof(int);
int memorySize_llr = CODEWORD_LEN * sizeof(float);
info_bin = (int *) malloc(memorySize_infobits) ;
int *codeword = (int *) malloc(memorySize_codeword) ;
float *trans = (float *) malloc(memorySize_llr) ;
float *recv = (float *) malloc(memorySize_llr) ;
float *llr = (float *) malloc(memorySize_llr) ;
float rate = (float)0.5f;
//////////////////////////////////////////////////////////////////////////////////
// all the variables Starting with _gpu is used in host code and for gpu computation
int memorySize_infobits_gpu = MCW * CW * memorySize_infobits ;
int memorySize_llr_gpu = MCW * CW * CODEWORD_LEN * sizeof(float);
int memorySize_dt_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_R_gpu = MCW * CW * ROW * BLK_COL * sizeof(float);
int memorySize_hard_decision_gpu = MCW * CW * CODEWORD_LEN * sizeof(int);
int *info_bin_gpu;
float *llr_gpu;
int * hard_decision_gpu;
info_bin_gpu = (int *) malloc(memorySize_infobits_gpu);
hard_decision_gpu = (int *) malloc(memorySize_hard_decision_gpu);
llr_gpu = (float *) malloc(memorySize_llr_gpu);
error_result this_error;
int total_frame_error = 0;
int total_bit_error = 0;
int total_codeword = 0;
// create device memory
float * dev_llr;
float * dev_dt;
float * dev_R;
int * dev_hard_decision;
h_element * dev_h_compact1;
h_element * dev_h_compact2;
char * dev_h_element_count1;
char * dev_h_element_count2;
hipMalloc((void **)&dev_llr, memorySize_llr_gpu);
hipMalloc((void **)&dev_dt, memorySize_dt_gpu);
hipMalloc((void **)&dev_R, memorySize_R_gpu);
hipMalloc((void **)&dev_hard_decision, memorySize_hard_decision_gpu);
hipMalloc((void **)&dev_h_compact1, memorySize_h_compact1);
hipMalloc((void **)&dev_h_compact2, memorySize_h_compact2);
hipMalloc((void **)&dev_h_element_count1, BLK_ROW);
hipMalloc((void **)&dev_h_element_count2, BLK_COL);
hipMemcpy(dev_h_element_count1, h_element_count1, BLK_ROW, hipMemcpyHostToDevice);
hipMemcpy(dev_h_element_count2, h_element_count2, BLK_COL, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact1, h_compact1, memorySize_h_compact1, hipMemcpyHostToDevice);
hipMemcpy(dev_h_compact2, h_compact2, memorySize_h_compact2, hipMemcpyHostToDevice);
srand(69012);
for(int snri = 0; snri < NUM_SNR; snri++)
{
float snr = snr_array[snri];
sigma = 1.0f/sqrt(2.0f*rate*pow(10.0f,(snr/10.0f)));
total_codeword = 0;
total_frame_error = 0;
total_bit_error = 0;
// Adjust MIN_CODWORD in LDPC.h to reduce simulation time
while ( (total_frame_error <= MIN_FER) && (total_codeword <= MIN_CODEWORD))
{
total_codeword += CW * MCW;
for(int i = 0; i < CW * MCW; i++)
{
// generate random data
info_gen (info_bin, rand());
// encode the data
structure_encode (info_bin, codeword, h_base);
// BPSK modulation
modulation (codeword, trans);
// additive white Gaussian noise
awgn (trans, recv, rand());
// LLR init
llr_init (llr, recv);
// copy the info_bin and llr to the total memory
memcpy(info_bin_gpu + i * INFO_LEN, info_bin, memorySize_infobits);
memcpy(llr_gpu + i * CODEWORD_LEN, llr, memorySize_llr);
}
// Define kernel dimension
dim3 dimGridKernel1(BLK_ROW, MCW, 1); // dim of the thread blocks
dim3 dimBlockKernel1(BLOCK_SIZE_X, CW, 1);
int sharedRCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT * sizeof(float);
dim3 dimGridKernel2(BLK_COL, MCW, 1);
dim3 dimBlockKernel2(BLOCK_SIZE_X, CW, 1);
//int sharedDtCacheSize = THREADS_PER_BLOCK * NON_EMPTY_ELMENT_VNP * sizeof(float);
// run the kernel
float total_time = 0.f;
for(int j = 0; j < MAX_SIM; j++)
{
// Transfer LLR data into device.
hipMemcpy(dev_llr, llr_gpu, memorySize_llr_gpu, hipMemcpyHostToDevice);
// kernel launch
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int ii = 0; ii < MAX_ITERATION; ii++)
{
// run check-node processing kernel
// TODO: run a special kernel the first iteration?
if(ii == 0) {
hipLaunchKernelGGL(ldpc_cnp_kernel_1st_iter, dimGridKernel1, dimBlockKernel1, 0, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
} else {
hipLaunchKernelGGL(ldpc_cnp_kernel, dimGridKernel1, dimBlockKernel1, sharedRCacheSize, 0, dev_llr,
dev_dt,
dev_R,
dev_h_element_count1,
dev_h_compact1);
}
// run variable-node processing kernel
// for the last iteration we run a special
// kernel. this is because we can make a hard
// decision instead of writing back the belief
// for the value of each bit.
if(ii < MAX_ITERATION - 1) {
hipLaunchKernelGGL(ldpc_vnp_kernel_normal, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_h_element_count2,
dev_h_compact2);
} else {
hipLaunchKernelGGL(ldpc_vnp_kernel_last_iter, dimGridKernel2, dimBlockKernel2, 0, 0, dev_llr,
dev_dt,
dev_hard_decision,
dev_h_element_count2,
dev_h_compact2);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
// copy the decoded data from device to host
hipMemcpy(hard_decision_gpu,
dev_hard_decision,
memorySize_hard_decision_gpu,
hipMemcpyDeviceToHost);
this_error = error_check(info_bin_gpu, hard_decision_gpu);
total_bit_error += this_error.bit_error;
total_frame_error += this_error.frame_error;
} // end of MAX-SIM
printf ("\n");
printf ("Total kernel execution time: %f (s)\n", total_time * 1e-9f);
printf ("# codewords = %d, CW=%d, MCW=%d\n",total_codeword, CW, MCW);
printf ("total bit error = %d\n", total_bit_error);
printf ("total frame error = %d\n", total_frame_error);
printf ("BER = %1.2e, FER = %1.2e\n",
(float) total_bit_error/total_codeword/INFO_LEN,
(float) total_frame_error/total_codeword);
} // end of the MAX frame error.
}// end of the snr loop
hipFree(dev_llr);
hipFree(dev_dt);
hipFree(dev_R);
hipFree(dev_hard_decision);
hipFree(dev_h_compact1);
hipFree(dev_h_compact2);
hipFree(dev_h_element_count1);
hipFree(dev_h_element_count2);
free(info_bin);
free(codeword);
free(trans);
free(recv);
free(llr);
free(llr_gpu);
free(hard_decision_gpu);
free(info_bin_gpu);
return 0;
}
|
4861471e2863c4376ecbceac0cf14704e8872353.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
// find the (x,y) coordinate of the pixel processed by this tread
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
// check index is within the image and process the image
if (x < numCols && y < numRows) {
// get the GBR values of the color image
uchar4 rgba = rgbaImage[y * numCols + x];
// compute the grey scale
float grey = 0.299f * rgba.x + 0.587f * rgba.y + 0.114f * rgba.z;
// set the grey scale of the pixel in the grey image
greyImage[y * numCols + x] = grey;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const size_t blockSide = 20; // number of threads in 1-d of a block
const dim3 blockSize(blockSide, blockSide, 1); //TODO
const dim3 gridSize( (numCols+blockSide-1)/blockSide, (numRows+blockSide-1)/blockSide, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 4861471e2863c4376ecbceac0cf14704e8872353.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
// find the (x,y) coordinate of the pixel processed by this tread
size_t x = blockIdx.x * blockDim.x + threadIdx.x;
size_t y = blockIdx.y * blockDim.y + threadIdx.y;
// check index is within the image and process the image
if (x < numCols && y < numRows) {
// get the GBR values of the color image
uchar4 rgba = rgbaImage[y * numCols + x];
// compute the grey scale
float grey = 0.299f * rgba.x + 0.587f * rgba.y + 0.114f * rgba.z;
// set the grey scale of the pixel in the grey image
greyImage[y * numCols + x] = grey;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const size_t blockSide = 20; // number of threads in 1-d of a block
const dim3 blockSize(blockSide, blockSide, 1); //TODO
const dim3 gridSize( (numCols+blockSide-1)/blockSide, (numRows+blockSide-1)/blockSide, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
c9062b98c5797e135679f1edd2c1cf3e0f80c780.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <vector>
#include "opencv2/cudalegacy/NCV.hpp"
//===================================================================
//
// Operations with rectangles
//
//===================================================================
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
hipStream_t cuStream)
{
(void)cuStream;
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
| c9062b98c5797e135679f1edd2c1cf3e0f80c780.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <vector>
#include "opencv2/cudalegacy/NCV.hpp"
//===================================================================
//
// Operations with rectangles
//
//===================================================================
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
cudaStream_t cuStream)
{
(void)cuStream;
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
|
2afb2bab7add726c9b6a2aa18054ed636619e8b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/unary/cast.h"
#include <hip/hip_fp16.h>
#include <utility>
#include <map>
template <typename T>
struct ViaTypeMap {
typedef T ViaT;
};
template <>
struct ViaTypeMap<half> {
typedef float ViaT;
};
template <typename InT, typename OutT>
__device__ __inline__ OutT ppl_scalar_cast(const InT &a)
{
const bool any_float16 = std::is_same<half, InT>::value || std::is_same<half, OutT>::value;
typedef typename std::conditional<any_float16, half, OutT>::type T;
typedef typename ViaTypeMap<T>::ViaT ViaT;
return (OutT)((ViaT)a);
}
template <typename InT, typename OutT>
__global__ void ppl_cukernel_cast_any(
const uint64_t num_elems,
const void *input,
void *output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
const InT *input_ptr = static_cast<const InT *>(input);
InT in_val = input_ptr[index];
OutT *output_ptr = static_cast<OutT *>(output);
output_ptr[index] = ppl_scalar_cast<InT, OutT>(in_val);
#endif
}
#define INSERT_CAST_FUNC(SrcTyName, DstTyName, SrcT, DstT) \
func_map.insert({DataTypePair(SrcTyName, DstTyName), ppl_cukernel_cast_any<SrcT, DstT>});
#define INSERT_CAST_FUNC2(SrcTyName, SrcT) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_FLOAT16, SrcT, half) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_FLOAT32, SrcT, float) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_FLOAT64, SrcT, double) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT8, SrcT, int8_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT16, SrcT, int16_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT32, SrcT, int32_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT64, SrcT, int64_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT8, SrcT, uint8_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT16, SrcT, uint16_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT32, SrcT, uint32_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT64, SrcT, uint64_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_BOOL, SrcT, bool)
ppl::common::RetCode PPLCUDACastForwardImp(
hipStream_t stream,
const ppl::nn::TensorShape *input_shape,
const void *input,
const ppl::nn::TensorShape *output_shape,
void *output,
int to_)
{
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int channels = output_shape->GetDim(1);
int pad_channels = output_shape->GetDim(1) + output_shape->GetPadding1(1);
int height = output_shape->GetDim(2);
int width = output_shape->GetDim(3);
int block_size = 256;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
const ppl::common::datatype_t in_t = input_shape->GetDataType();
const ppl::common::datatype_t out_t = output_shape->GetDataType();
typedef void (*FuncType)(const uint64_t, const void *, void *);
typedef std::pair<ppl::common::datatype_t, ppl::common::datatype_t> DataTypePair;
std::map<DataTypePair, FuncType> func_map;
INSERT_CAST_FUNC2(ppl::common::DATATYPE_FLOAT16, half)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_FLOAT32, float)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_FLOAT64, double)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT8, int8_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT16, int16_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT32, int32_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT64, int64_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT8, uint8_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT16, uint16_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT32, uint32_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT64, uint64_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_BOOL, bool)
func_map[DataTypePair(in_t,hipLaunchKernelGGL(( out_t))], dim3(grid_size), dim3(block_size), 0, stream,
num_elems, (const void *)input, (void *)output);
return ppl::common::RC_SUCCESS;
} | 2afb2bab7add726c9b6a2aa18054ed636619e8b6.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/unary/cast.h"
#include <cuda_fp16.h>
#include <utility>
#include <map>
template <typename T>
struct ViaTypeMap {
typedef T ViaT;
};
template <>
struct ViaTypeMap<half> {
typedef float ViaT;
};
template <typename InT, typename OutT>
__device__ __inline__ OutT ppl_scalar_cast(const InT &a)
{
const bool any_float16 = std::is_same<half, InT>::value || std::is_same<half, OutT>::value;
typedef typename std::conditional<any_float16, half, OutT>::type T;
typedef typename ViaTypeMap<T>::ViaT ViaT;
return (OutT)((ViaT)a);
}
template <typename InT, typename OutT>
__global__ void ppl_cukernel_cast_any(
const uint64_t num_elems,
const void *input,
void *output)
{
#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9
uint64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_elems)
return;
const InT *input_ptr = static_cast<const InT *>(input);
InT in_val = input_ptr[index];
OutT *output_ptr = static_cast<OutT *>(output);
output_ptr[index] = ppl_scalar_cast<InT, OutT>(in_val);
#endif
}
#define INSERT_CAST_FUNC(SrcTyName, DstTyName, SrcT, DstT) \
func_map.insert({DataTypePair(SrcTyName, DstTyName), ppl_cukernel_cast_any<SrcT, DstT>});
#define INSERT_CAST_FUNC2(SrcTyName, SrcT) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_FLOAT16, SrcT, half) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_FLOAT32, SrcT, float) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_FLOAT64, SrcT, double) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT8, SrcT, int8_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT16, SrcT, int16_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT32, SrcT, int32_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_INT64, SrcT, int64_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT8, SrcT, uint8_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT16, SrcT, uint16_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT32, SrcT, uint32_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_UINT64, SrcT, uint64_t) \
INSERT_CAST_FUNC(SrcTyName, ppl::common::DATATYPE_BOOL, SrcT, bool)
ppl::common::RetCode PPLCUDACastForwardImp(
cudaStream_t stream,
const ppl::nn::TensorShape *input_shape,
const void *input,
const ppl::nn::TensorShape *output_shape,
void *output,
int to_)
{
uint64_t num_elems = output_shape->GetElementsIncludingPadding();
int channels = output_shape->GetDim(1);
int pad_channels = output_shape->GetDim(1) + output_shape->GetPadding1(1);
int height = output_shape->GetDim(2);
int width = output_shape->GetDim(3);
int block_size = 256;
uint64_t grid_size = (num_elems + block_size - 1) / block_size;
const ppl::common::datatype_t in_t = input_shape->GetDataType();
const ppl::common::datatype_t out_t = output_shape->GetDataType();
typedef void (*FuncType)(const uint64_t, const void *, void *);
typedef std::pair<ppl::common::datatype_t, ppl::common::datatype_t> DataTypePair;
std::map<DataTypePair, FuncType> func_map;
INSERT_CAST_FUNC2(ppl::common::DATATYPE_FLOAT16, half)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_FLOAT32, float)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_FLOAT64, double)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT8, int8_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT16, int16_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT32, int32_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_INT64, int64_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT8, uint8_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT16, uint16_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT32, uint32_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_UINT64, uint64_t)
INSERT_CAST_FUNC2(ppl::common::DATATYPE_BOOL, bool)
func_map[DataTypePair(in_t, out_t)]<<<grid_size, block_size, 0, stream>>>(
num_elems, (const void *)input, (void *)output);
return ppl::common::RC_SUCCESS;
} |
9c165808bcebea2396d64628d1cfe99ef1492d4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__global__ void calcSigmoidForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
v = 1.0f / (1.0f + exp( -v )); // sigmoid
out[id] = v;
}
/* original
for ( int i = 0; i < in_total_size; ++i ){
out.data[i] = activator_function(in.data[i]);
}
*/
}
__global__ void calcSigmoidBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float x = dz_in[id] += dz_next_layer[id];
float sig = 1.0f / (1.0f + exp( -x ));
dz[id] += ( sig * (1 - sig) ) * dz_in[id]; // sigmoid_derivative * dz_in
}
/* original
for( int i = 0; i < dz_in.size.b * dz_in.size.x * dz_in.size.y * dz_in.size.z; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
}
for ( int i = 0; i < in_total_size; ++i ){
dz.data[i] += activator_derivative( in.data[i] ) * dz_in.data[i];
}
*/
}
void sigmoidForwardGPU(float *in, float *out, int N)
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcSigmoidForwardGPU), dim3(grid), dim3(BLOCK), 0, 0, in, out, N);
}
void sigmoidBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcSigmoidBackwardGPU), dim3(grid), dim3(BLOCK), 0, 0, dz_next_layer, dz_in, dz, in, N );
}
} // namespace gpu
| 9c165808bcebea2396d64628d1cfe99ef1492d4c.cu | #include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__global__ void calcSigmoidForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
v = 1.0f / (1.0f + exp( -v )); // sigmoid
out[id] = v;
}
/* original
for ( int i = 0; i < in_total_size; ++i ){
out.data[i] = activator_function(in.data[i]);
}
*/
}
__global__ void calcSigmoidBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float x = dz_in[id] += dz_next_layer[id];
float sig = 1.0f / (1.0f + exp( -x ));
dz[id] += ( sig * (1 - sig) ) * dz_in[id]; // sigmoid_derivative * dz_in
}
/* original
for( int i = 0; i < dz_in.size.b * dz_in.size.x * dz_in.size.y * dz_in.size.z; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
}
for ( int i = 0; i < in_total_size; ++i ){
dz.data[i] += activator_derivative( in.data[i] ) * dz_in.data[i];
}
*/
}
void sigmoidForwardGPU(float *in, float *out, int N)
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcSigmoidForwardGPU<<<grid, BLOCK>>>(in, out, N);
}
void sigmoidBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcSigmoidBackwardGPU<<<grid, BLOCK>>>( dz_next_layer, dz_in, dz, in, N );
}
} // namespace gpu
|
bde7b246e0fbfc84e220d1ff5b7eb37340a06972.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <aev.h>
#include <torch/extension.h>
#include <cuaev_cub.cuh>
#include <ATen/Context.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <c10/hip/HIPException.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <vector>
#define PI 3.141592653589793
using torch::Tensor;
// fetch from the following matrix
// [[ 0, 1, 2, 3, 4],
// [ 1, 5, 6, 7, 8],
// [ 2, 6, 9, 10, 11],
// [ 3, 7, 10, 12, 13],
// [ 4, 8, 11, 13, 14]]
constexpr int csubaev_offsets(int i, int j, int n) {
int larger = ::max(i, j);
int smaller = ::min(i, j);
int starting = smaller * (2 * n - smaller + 1) / 2; // n + (n - 1) + ... + (n - smaller + 1)
int offset = larger - smaller;
return starting + offset;
}
template <typename DataT>
struct PairDist {
DataT Rij;
int midx;
short i;
short j;
};
// used to group Rijs by atom id
template <typename DataT>
__host__ __device__ bool operator==(const PairDist<DataT>& lhs, const PairDist<DataT>& rhs) {
return lhs.midx == rhs.midx && lhs.i == rhs.i;
}
/// Alignment of memory. Must be a power of two
/// \tparam boundary Boundary to align to (NOTE: must be power of 2)
/// \param value Input value that is to be aligned
/// \return Value aligned to boundary
template <int32_t boundary>
__host__ __device__ __forceinline__ int align(const int& value) {
static_assert((boundary & (boundary - 1)) == 0, "Boundary for align must be power of 2");
return (value + boundary) & ~(boundary - 1);
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
extern __shared__ DataT spos[];
DataT* sx = &spos[0];
DataT* sy = &spos[max_natoms_per_mol];
DataT* sz = &spos[2 * max_natoms_per_mol];
int mol_idx = blockIdx.x;
int tidx = threadIdx.y * blockDim.x + threadIdx.x;
for (int i = tidx; i < max_natoms_per_mol; i += blockDim.x * blockDim.y) {
sx[i] = pos_t[mol_idx][i][0];
sy[i] = pos_t[mol_idx][i][1];
sz[i] = pos_t[mol_idx][i][2];
}
__syncthreads();
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
for (int i = threadIdx.y; i < max_natoms_per_mol; i += blockDim.y) {
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = sx[i];
DataT yi = sy[i];
DataT zi = sz[i];
for (int j = threadIdx.x; j < max_natoms_per_mol; j += blockDim.x) {
SpeciesT type_j = species_t[mol_idx][j];
const DataT xj = sx[j];
const DataT yj = sy[j];
const DataT zj = sz[j];
const DataT delx = xj - xi;
const DataT dely = yj - yi;
const DataT delz = zj - zi;
const DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistanceSingleMolecule(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
constexpr int mol_idx = 0;
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= max_natoms_per_mol || j >= max_natoms_per_mol)
return;
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
SpeciesT type_j = species_t[mol_idx][j];
DataT xj = pos_t[mol_idx][j][0];
DataT yj = pos_t[mol_idx][j][1];
DataT zj = pos_t[mol_idx][j][2];
DataT delx = xj - xi;
DataT dely = yj - yi;
DataT delz = zj - zi;
DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
// every block compute blocksize RIJ's gradient by column major, to avoid atomicAdd waiting
template <bool is_double_backward, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance_backward_or_doublebackward(
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_coord_or_force, // dcoord for backward, dforce(i.e. ddcoord) for double backward
const PairDist<DataT>* d_radialRij,
IndexT nRadialRij) {
int gidx = threadIdx.x * gridDim.x + blockIdx.x;
if (gidx >= nRadialRij)
return;
PairDist<DataT> d = d_radialRij[gidx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
const DataT delx = pos_t[mol_idx][j][0] - pos_t[mol_idx][i][0];
const DataT dely = pos_t[mol_idx][j][1] - pos_t[mol_idx][i][1];
const DataT delz = pos_t[mol_idx][j][2] - pos_t[mol_idx][i][2];
if (is_double_backward) {
auto& grad_force = grad_coord_or_force;
DataT grad_force_coord_Rij_item = (grad_force[mol_idx][j][0] - grad_force[mol_idx][i][0]) * delx / Rij +
(grad_force[mol_idx][j][1] - grad_force[mol_idx][i][1]) * dely / Rij +
(grad_force[mol_idx][j][2] - grad_force[mol_idx][i][2]) * delz / Rij;
grad_dist[gidx] = grad_force_coord_Rij_item;
} else {
auto& grad_coord = grad_coord_or_force;
DataT grad_dist_coord_x = delx / Rij;
DataT grad_dist_coord_y = dely / Rij;
DataT grad_dist_coord_z = delz / Rij;
DataT grad_radial_dist_item = grad_dist[gidx];
atomicAdd(&grad_coord[mol_idx][j][0], grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][j][1], grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][j][2], grad_radial_dist_item * grad_dist_coord_z);
atomicAdd(&grad_coord[mol_idx][i][0], -grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][i][1], -grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][i][2], -grad_radial_dist_item * grad_dist_coord_z);
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4>
__global__ void cuAngularAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom;
DataT* saev = &smem[groupIdx * angular_length_aligned];
int offset = ncatom_per_tpb * angular_length_aligned;
DataT* sdx = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
saev[iaev] = 0;
}
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
sfc[jj] = fc_ij;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
theta = acos(0.95 * (sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]) / (Rij * Rik));
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT res = 2 * factor1 * factor2 * fc_ijk;
saev[subaev_offset + ishfr * nShfZ + itheta] += res;
}
}
}
}
}
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
aev_t[mol_idx][i][radial_length + iaev] = saev[iaev];
}
}
template <
bool is_double_backward,
typename SpeciesT,
typename DataT,
typename IndexT = int,
int TILEX = 8,
int TILEY = 4>
__global__ void cuAngularAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_output, // for backward, this is daev, for double backward, this is dforce (i.e. ddcoord)
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_input, // for backward, this is dcoord, for double backward, this is ddaev
const PairDist<DataT>* d_Rij,
const PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom; // e.g. 2 catom per block
DataT* sdx = &smem[groupIdx * maxnbrs_per_atom_aligned];
int offset = ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjx_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjy_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjz_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
// cutoff
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
DataT fc_ij_grad = -0.5 * (PI / Rca) * sin(PI * Rij / Rca);
sfc[jj] = fc_ij;
sfc_grad[jj] = fc_ij_grad;
}
// grad init
DataT sdix_grad = 0;
DataT sdiy_grad = 0;
DataT sdiz_grad = 0;
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
sdjx_grad[jj] = 0;
sdjy_grad[jj] = 0;
sdjz_grad[jj] = 0;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
const DataT tc = 0.95; // theta constant factor
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
DataT grad_fc_ij = sfc_grad[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
DataT grad_theta_vij_x = 0;
DataT grad_theta_vij_y = 0;
DataT grad_theta_vij_z = 0;
DataT grad_theta_vik_x = 0;
DataT grad_theta_vik_y = 0;
DataT grad_theta_vik_z = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
DataT vij_vik_dot = sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk];
theta = acos(tc * vij_vik_dot / (Rij * Rik));
// grad
DataT vij_factor =
tc / (Rij * Rij * Rij * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rij * Rij) + Rik * Rik));
DataT vik_factor = tc /
(Rik * Rik * Rik *
sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rik * Rik) + Rij * Rij)); // tricky 80ms improved
grad_theta_vij_x = vij_factor * (sdx[jj] * vij_vik_dot - sdx[kk] * Rij * Rij);
grad_theta_vij_y = vij_factor * (sdy[jj] * vij_vik_dot - sdy[kk] * Rij * Rij);
grad_theta_vij_z = vij_factor * (sdz[jj] * vij_vik_dot - sdz[kk] * Rij * Rij);
grad_theta_vik_x = vik_factor * (sdx[kk] * vij_vik_dot - sdx[jj] * Rik * Rik);
grad_theta_vik_y = vik_factor * (sdy[kk] * vij_vik_dot - sdy[jj] * Rik * Rik);
grad_theta_vik_z = vik_factor * (sdz[kk] * vij_vik_dot - sdz[jj] * Rik * Rik);
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
// TODO necessary?
DataT grad_theta_vij_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_x, srcLane);
DataT grad_theta_vij_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_y, srcLane);
DataT grad_theta_vij_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_z, srcLane);
DataT grad_theta_vik_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_x, srcLane);
DataT grad_theta_vik_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_y, srcLane);
DataT grad_theta_vik_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_z, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT grad_fc_ik = sfc_grad[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
float3 grad_vij = make_float3(0.f, 0.f, 0.f);
float3 grad_vik = make_float3(0.f, 0.f, 0.f);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
DataT grad_factor1_theta = 1.0 / 2.0 * Zeta * pow((1 + cos(ShfZ - theta_ijk)) / 2, Zeta - 1) *
sin(ShfZ - theta_ijk); // tricky 100ms improved
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT grad_factor2_dist = -EtaA * (Rijk - ShfA) * factor2;
DataT grad_vij_x = 2 *
(grad_factor1_theta * grad_theta_vij_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdx[jj] / Rij);
DataT grad_vij_y = 2 *
(grad_factor1_theta * grad_theta_vij_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdy[jj] / Rij);
DataT grad_vij_z = 2 *
(grad_factor1_theta * grad_theta_vij_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdz[jj] / Rij);
DataT grad_vik_x = 2 *
(grad_factor1_theta * grad_theta_vik_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdx[kk] / Rik);
DataT grad_vik_y = 2 *
(grad_factor1_theta * grad_theta_vik_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdy[kk] / Rik);
DataT grad_vik_z = 2 *
(grad_factor1_theta * grad_theta_vik_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdz[kk] / Rik);
if (is_double_backward) {
int atomj_idx = d_Rij[start_idx + jj].j;
int atomk_idx = d_Rij[start_idx + kk].j;
auto& grad_force = grad_output;
auto& grad_grad_aev = grad_input;
grad_vij_x *= (grad_force[mol_idx][atomj_idx][0] - grad_force[mol_idx][i][0]);
grad_vij_y *= (grad_force[mol_idx][atomj_idx][1] - grad_force[mol_idx][i][1]);
grad_vij_z *= (grad_force[mol_idx][atomj_idx][2] - grad_force[mol_idx][i][2]);
grad_vik_x *= (grad_force[mol_idx][atomk_idx][0] - grad_force[mol_idx][i][0]);
grad_vik_y *= (grad_force[mol_idx][atomk_idx][1] - grad_force[mol_idx][i][1]);
grad_vik_z *= (grad_force[mol_idx][atomk_idx][2] - grad_force[mol_idx][i][2]);
atomicAdd(
&grad_grad_aev[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta],
grad_vij_x + grad_vij_y + grad_vij_z + grad_vik_x + grad_vik_y + grad_vik_z);
} else {
DataT grad_output_item = grad_output[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta];
grad_vij_x *= grad_output_item;
grad_vij_y *= grad_output_item;
grad_vij_z *= grad_output_item;
grad_vik_x *= grad_output_item;
grad_vik_y *= grad_output_item;
grad_vik_z *= grad_output_item;
grad_vij.x += grad_vij_x;
grad_vij.y += grad_vij_y;
grad_vij.z += grad_vij_z;
grad_vik.x += grad_vik_x;
grad_vik.y += grad_vik_y;
grad_vik.z += grad_vik_z;
}
}
}
if (!is_double_backward) {
sdix_grad += (-grad_vij.x - grad_vik.x);
sdiy_grad += (-grad_vij.y - grad_vik.y);
sdiz_grad += (-grad_vij.z - grad_vik.z);
for (int offset = 16; offset > 0; offset /= 2) {
grad_vij.x += __shfl_down_sync(0xFFFFFFFF, grad_vij.x, offset);
grad_vij.y += __shfl_down_sync(0xFFFFFFFF, grad_vij.y, offset);
grad_vij.z += __shfl_down_sync(0xFFFFFFFF, grad_vij.z, offset);
grad_vik.x += __shfl_down_sync(0xFFFFFFFF, grad_vik.x, offset);
grad_vik.y += __shfl_down_sync(0xFFFFFFFF, grad_vik.y, offset);
grad_vik.z += __shfl_down_sync(0xFFFFFFFF, grad_vik.z, offset);
}
if (laneIdx == 0) {
sdjx_grad[jj] += grad_vij.x;
sdjy_grad[jj] += grad_vij.y;
sdjz_grad[jj] += grad_vij.z;
sdjx_grad[kk] += grad_vik.x;
sdjy_grad[kk] += grad_vik.y;
sdjz_grad[kk] += grad_vik.z;
}
}
}
}
}
if (!is_double_backward) {
auto& grad_coord = grad_input;
int atomi_idx = i;
atomicAdd(&grad_coord[mol_idx][atomi_idx][0], sdix_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][1], sdiy_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][2], sdiz_grad);
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
int atomj_idx = d_Rij[start_idx + jj].j;
atomicAdd(&grad_coord[mol_idx][atomj_idx][0], sdjx_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][1], sdjy_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][2], sdjz_grad[jj]);
}
}
}
template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)) * fc;
atomicAdd(&aev_t[mol_idx][i][type_j * radial_sublength + ishfr], GmR);
}
}
// every <THREADS_PER_RIJ> threads take care of 1 RIJ, and iterate <nShfR / THREADS_PER_RIJ> times
template <bool is_double_backward, typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_aev, // daev for backward, ddaev for double backward
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
const PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
DataT fc_grad = -0.5 * (PI / Rcr) * sin(PI * Rij / Rcr);
DataT upstream_grad;
if (is_double_backward) {
upstream_grad = grad_dist[idx];
}
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR));
DataT GmR_grad = -EtaR * (-2 * ShfR + 2 * Rij) * GmR;
DataT jacobian = GmR_grad * fc + GmR * fc_grad;
if (is_double_backward) {
atomicAdd(&grad_aev[mol_idx][i][type_j * radial_sublength + ishfr], upstream_grad * jacobian);
} else {
upstream_grad = grad_aev[mol_idx][i][type_j * radial_sublength + ishfr];
atomicAdd(&grad_dist[idx], upstream_grad * jacobian);
}
}
}
// NOTE: assumes size of EtaA_t = Zeta_t = EtaR_t = 1
Result cuaev_forward(const Tensor& coordinates_t, const Tensor& species_t, const AEVScalarParams& aev_params) {
TORCH_CHECK(
(species_t.dtype() == torch::kInt32) && (coordinates_t.dtype() == torch::kFloat32), "Unsupported input type");
TORCH_CHECK(
aev_params.EtaR_t.size(0) == 1 && aev_params.EtaA_t.size(0) == 1 && aev_params.Zeta_t.size(0) == 1,
"cuda extension is currently not supported for the specified "
"configuration");
TORCH_CHECK(
coordinates_t.device() == species_t.device() && coordinates_t.device() == aev_params.EtaR_t.device() &&
coordinates_t.device() == aev_params.EtaA_t.device(),
"coordinates, species, and aev_params should be on the same device");
float Rcr = aev_params.Rcr;
float Rca = aev_params.Rca;
const int n_molecules = species_t.size(0);
const int max_natoms_per_mol = species_t.size(1);
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto aev_t = torch::zeros({n_molecules, max_natoms_per_mol, aev_length}, coordinates_t.options());
if (species_t.numel() == 0) {
return {
aev_t, Tensor(), Tensor(), Tensor(), 0, 0, 0, Tensor(), Tensor(), Tensor(), 0, 0, 0, coordinates_t, species_t};
}
at::hip::HIPGuardMasqueradingAsCUDA device_guard(coordinates_t.device().index());
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
// buffer to store all the pairwise distance (Rij)
auto total_natom_pairs = n_molecules * max_natoms_per_mol * max_natoms_per_mol;
auto d_options = torch::dtype(torch::kUInt8).device(coordinates_t.device());
float inf = std::numeric_limits<float>::infinity();
Tensor tensor_Rij =
torch::full(sizeof(PairDist<float>) / sizeof(float) * total_natom_pairs, inf, d_options.dtype(torch::kFloat32));
PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr();
// buffer to store all the pairwise distance that is needed for Radial AEV
// computation
Tensor tensor_radialRij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options);
PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr();
auto buffer_count = allocator.allocate(sizeof(int));
int* d_count_out = (int*)buffer_count.get();
const int block_size = 64;
if (n_molecules == 1) {
int tileWidth = 32;
int tilesPerRow = (max_natoms_per_mol + tileWidth - 1) / tileWidth;
dim3 block(tileWidth, tileWidth, 1);
dim3 grid(tilesPerRow, tilesPerRow, 1);
hipLaunchKernelGGL(( pairwiseDistanceSingleMolecule), dim3(grid), dim3(block), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
dim3 block(8, 8, 1);
// Compute pairwise distance (Rij) for all atom pairs in a molecule
// maximum 4096 atoms, which needs 49152 byte (48 kb) of shared memory
// TODO: the kernel is not optimized for batched huge molecule (max_natoms_per_mol > 1000)
hipLaunchKernelGGL(( pairwiseDistance), dim3(n_molecules), dim3(block), sizeof(float) * max_natoms_per_mol * 3, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
// Extract Rijs that is needed for RadialAEV comptuation i.e. all the Rij <= Rcr
int nRadialRij = cubDeviceSelect(
d_Rij,
d_radialRij,
total_natom_pairs,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rcr; },
stream);
int nblocks = (nRadialRij * 8 + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuRadialAEVs<int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
nRadialRij);
C10_HIP_KERNEL_LAUNCH_CHECK();
// reuse buffer allocated for all Rij
// d_angularRij will store all the Rij required in Angular AEV computation
Tensor tensor_angularRij = torch::empty(sizeof(PairDist<float>) * nRadialRij, d_options);
PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr();
// Extract Rijs that is needed for AngularAEV comptuation i.e. all the Rij
// <= Rca
int nAngularRij = cubDeviceSelect(
d_radialRij,
d_angularRij,
nRadialRij,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rca; },
stream);
Tensor tensor_centralAtom = torch::empty(sizeof(PairDist<float>) * nAngularRij, d_options);
PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr();
Tensor tensor_numPairsPerCenterAtom = torch::empty(sizeof(int) * nAngularRij, d_options);
int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr();
// group by center atom
int ncenter_atoms = cubEncode(d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, nAngularRij, d_count_out, stream);
Tensor tensor_centerAtomStartIdx = torch::empty(sizeof(int) * ncenter_atoms, d_options);
int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr();
cubScan(d_numPairsPerCenterAtom, d_centerAtomStartIdx, ncenter_atoms, stream);
{
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sm_aev = sizeof(float) * align<4>(aev_params.angular_length); // (angular_length / 4 + 1) * 4
int sxyz = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sm_aev + sxyz + sRij + sfc + sj) * ncatom_per_tpb;
};
int maxNbrsPerCenterAtom = cubMax(d_numPairsPerCenterAtom, ncenter_atoms, d_count_out, stream);
int maxnbrs_per_atom_aligned = align<4>(maxNbrsPerCenterAtom);
int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
int angular_length_aligned = align<4>(aev_params.angular_length);
hipLaunchKernelGGL(( cuAngularAEVs), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms);
C10_HIP_KERNEL_LAUNCH_CHECK();
return {
aev_t,
tensor_Rij,
tensor_radialRij,
tensor_angularRij,
total_natom_pairs,
nRadialRij,
nAngularRij,
tensor_centralAtom,
tensor_numPairsPerCenterAtom,
tensor_centerAtomStartIdx,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms,
coordinates_t,
species_t};
}
}
Tensor cuaev_backward(const Tensor& grad_output, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(coordinates_t.device().index());
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto grad_coord = torch::zeros(coordinates_t.sizes(), coordinates_t.options().requires_grad(false)); // [2, 5, 3]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
Tensor grad_radial_dist = torch::zeros(result.nRadialRij, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuRadialAEVs_backward_or_doublebackward<false, int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
C10_HIP_KERNEL_LAUNCH_CHECK();
// For best result, block_size should match average molecule size (no padding) to avoid atomicAdd
nblocks = (result.nRadialRij + block_size - 1) / block_size;
hipLaunchKernelGGL(( pairwiseDistance_backward_or_doublebackward<false>), dim3(nblocks), dim3(block_size), 0, stream,
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
C10_HIP_KERNEL_LAUNCH_CHECK();
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
Tensor grad_angular_coord = torch::zeros({result.nAngularRij, 3}, coordinates_t.options().requires_grad(false));
hipLaunchKernelGGL(( cuAngularAEVs_backward_or_doublebackward<false>), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
C10_HIP_KERNEL_LAUNCH_CHECK();
return grad_coord;
}
Tensor cuaev_double_backward(const Tensor& grad_force, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(coordinates_t.device().index());
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto grad_grad_aev = torch::zeros(
{coordinates_t.size(0), coordinates_t.size(1), aev_length},
coordinates_t.options().requires_grad(false)); // [2, 5, 384]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
auto grad_force_coord_Rij = torch::zeros({result.nRadialRij}, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij + block_size - 1) / block_size;
hipLaunchKernelGGL(( pairwiseDistance_backward_or_doublebackward<true>), dim3(nblocks), dim3(block_size), 0, stream,
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
C10_HIP_KERNEL_LAUNCH_CHECK();
nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
hipLaunchKernelGGL(( cuRadialAEVs_backward_or_doublebackward<true, int, float, 8>), dim3(nblocks), dim3(block_size), 0, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
C10_HIP_KERNEL_LAUNCH_CHECK();
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
hipLaunchKernelGGL(( cuAngularAEVs_backward_or_doublebackward<true>), dim3(nblocks_angAEV), dim3(block_size), smem_size_aligned, stream,
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
C10_HIP_KERNEL_LAUNCH_CHECK();
return grad_grad_aev;
}
| bde7b246e0fbfc84e220d1ff5b7eb37340a06972.cu | #include <aev.h>
#include <torch/extension.h>
#include <cuaev_cub.cuh>
#include <ATen/Context.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAGuard.h>
#include <c10/cuda/CUDAStream.h>
#include <vector>
#define PI 3.141592653589793
using torch::Tensor;
// fetch from the following matrix
// [[ 0, 1, 2, 3, 4],
// [ 1, 5, 6, 7, 8],
// [ 2, 6, 9, 10, 11],
// [ 3, 7, 10, 12, 13],
// [ 4, 8, 11, 13, 14]]
constexpr int csubaev_offsets(int i, int j, int n) {
int larger = std::max(i, j);
int smaller = std::min(i, j);
int starting = smaller * (2 * n - smaller + 1) / 2; // n + (n - 1) + ... + (n - smaller + 1)
int offset = larger - smaller;
return starting + offset;
}
template <typename DataT>
struct PairDist {
DataT Rij;
int midx;
short i;
short j;
};
// used to group Rijs by atom id
template <typename DataT>
__host__ __device__ bool operator==(const PairDist<DataT>& lhs, const PairDist<DataT>& rhs) {
return lhs.midx == rhs.midx && lhs.i == rhs.i;
}
/// Alignment of memory. Must be a power of two
/// \tparam boundary Boundary to align to (NOTE: must be power of 2)
/// \param value Input value that is to be aligned
/// \return Value aligned to boundary
template <int32_t boundary>
__host__ __device__ __forceinline__ int align(const int& value) {
static_assert((boundary & (boundary - 1)) == 0, "Boundary for align must be power of 2");
return (value + boundary) & ~(boundary - 1);
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
extern __shared__ DataT spos[];
DataT* sx = &spos[0];
DataT* sy = &spos[max_natoms_per_mol];
DataT* sz = &spos[2 * max_natoms_per_mol];
int mol_idx = blockIdx.x;
int tidx = threadIdx.y * blockDim.x + threadIdx.x;
for (int i = tidx; i < max_natoms_per_mol; i += blockDim.x * blockDim.y) {
sx[i] = pos_t[mol_idx][i][0];
sy[i] = pos_t[mol_idx][i][1];
sz[i] = pos_t[mol_idx][i][2];
}
__syncthreads();
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
for (int i = threadIdx.y; i < max_natoms_per_mol; i += blockDim.y) {
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = sx[i];
DataT yi = sy[i];
DataT zi = sz[i];
for (int j = threadIdx.x; j < max_natoms_per_mol; j += blockDim.x) {
SpeciesT type_j = species_t[mol_idx][j];
const DataT xj = sx[j];
const DataT yj = sy[j];
const DataT zj = sz[j];
const DataT delx = xj - xi;
const DataT dely = yj - yi;
const DataT delz = zj - zi;
const DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int>
__global__ void pairwiseDistanceSingleMolecule(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
PairDist<DataT>* d_Rij,
IndexT max_natoms_per_mol) {
constexpr int mol_idx = 0;
int natom_pairs = max_natoms_per_mol * max_natoms_per_mol;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= max_natoms_per_mol || j >= max_natoms_per_mol)
return;
SpeciesT type_i = species_t[mol_idx][i];
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
SpeciesT type_j = species_t[mol_idx][j];
DataT xj = pos_t[mol_idx][j][0];
DataT yj = pos_t[mol_idx][j][1];
DataT zj = pos_t[mol_idx][j][2];
DataT delx = xj - xi;
DataT dely = yj - yi;
DataT delz = zj - zi;
DataT Rsq = delx * delx + dely * dely + delz * delz;
if (type_i != -1 && type_j != -1 && i != j) {
DataT Rij = sqrt(Rsq);
PairDist<DataT> d;
d.Rij = Rij;
d.midx = mol_idx;
d.i = i;
d.j = j;
d_Rij[mol_idx * natom_pairs + i * max_natoms_per_mol + j] = d;
}
}
// every block compute blocksize RIJ's gradient by column major, to avoid atomicAdd waiting
template <bool is_double_backward, typename DataT, typename IndexT = int>
__global__ void pairwiseDistance_backward_or_doublebackward(
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_coord_or_force, // dcoord for backward, dforce(i.e. ddcoord) for double backward
const PairDist<DataT>* d_radialRij,
IndexT nRadialRij) {
int gidx = threadIdx.x * gridDim.x + blockIdx.x;
if (gidx >= nRadialRij)
return;
PairDist<DataT> d = d_radialRij[gidx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
const DataT delx = pos_t[mol_idx][j][0] - pos_t[mol_idx][i][0];
const DataT dely = pos_t[mol_idx][j][1] - pos_t[mol_idx][i][1];
const DataT delz = pos_t[mol_idx][j][2] - pos_t[mol_idx][i][2];
if (is_double_backward) {
auto& grad_force = grad_coord_or_force;
DataT grad_force_coord_Rij_item = (grad_force[mol_idx][j][0] - grad_force[mol_idx][i][0]) * delx / Rij +
(grad_force[mol_idx][j][1] - grad_force[mol_idx][i][1]) * dely / Rij +
(grad_force[mol_idx][j][2] - grad_force[mol_idx][i][2]) * delz / Rij;
grad_dist[gidx] = grad_force_coord_Rij_item;
} else {
auto& grad_coord = grad_coord_or_force;
DataT grad_dist_coord_x = delx / Rij;
DataT grad_dist_coord_y = dely / Rij;
DataT grad_dist_coord_z = delz / Rij;
DataT grad_radial_dist_item = grad_dist[gidx];
atomicAdd(&grad_coord[mol_idx][j][0], grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][j][1], grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][j][2], grad_radial_dist_item * grad_dist_coord_z);
atomicAdd(&grad_coord[mol_idx][i][0], -grad_radial_dist_item * grad_dist_coord_x);
atomicAdd(&grad_coord[mol_idx][i][1], -grad_radial_dist_item * grad_dist_coord_y);
atomicAdd(&grad_coord[mol_idx][i][2], -grad_radial_dist_item * grad_dist_coord_z);
}
}
template <typename SpeciesT, typename DataT, typename IndexT = int, int TILEX = 8, int TILEY = 4>
__global__ void cuAngularAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom;
DataT* saev = &smem[groupIdx * angular_length_aligned];
int offset = ncatom_per_tpb * angular_length_aligned;
DataT* sdx = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
saev[iaev] = 0;
}
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
sfc[jj] = fc_ij;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
theta = acos(0.95 * (sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk]) / (Rij * Rik));
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT res = 2 * factor1 * factor2 * fc_ijk;
saev[subaev_offset + ishfr * nShfZ + itheta] += res;
}
}
}
}
}
for (int iaev = laneIdx; iaev < angular_length; iaev += threads_per_catom) {
aev_t[mol_idx][i][radial_length + iaev] = saev[iaev];
}
}
template <
bool is_double_backward,
typename SpeciesT,
typename DataT,
typename IndexT = int,
int TILEX = 8,
int TILEY = 4>
__global__ void cuAngularAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> pos_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfZ_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaA_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> Zeta_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_output, // for backward, this is daev, for double backward, this is dforce (i.e. ddcoord)
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_input, // for backward, this is dcoord, for double backward, this is ddaev
const PairDist<DataT>* d_Rij,
const PairDist<DataT>* d_centralAtom,
int* d_nPairsPerCenterAtom,
int* d_centerAtomStartIdx,
float Rca,
int angular_length,
int angular_sublength,
int radial_length,
int num_species,
int maxnbrs_per_atom_aligned,
int angular_length_aligned,
int ncentral_atoms) {
extern __shared__ DataT smem[];
constexpr int threads_per_catom = TILEX * TILEY;
static_assert(threads_per_catom == C10_WARP_SIZE);
int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
int cIdx = gIdx / threads_per_catom; // central atom id
if (cIdx >= ncentral_atoms)
return;
int groupIdx = threadIdx.x / threads_per_catom;
int laneIdx = threadIdx.x % threads_per_catom;
int ncatom_per_tpb = blockDim.x / threads_per_catom; // e.g. 2 catom per block
DataT* sdx = &smem[groupIdx * maxnbrs_per_atom_aligned];
int offset = ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdy = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdz = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjx_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjy_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdjz_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sdist = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
DataT* sfc_grad = &smem[offset + groupIdx * maxnbrs_per_atom_aligned];
offset += ncatom_per_tpb * maxnbrs_per_atom_aligned;
int* stype = (int*)&smem[offset + groupIdx * maxnbrs_per_atom_aligned];
DataT EtaA = EtaA_t[0];
DataT Zeta = Zeta_t[0];
IndexT nShfA = ShfA_t.size(0);
IndexT nShfZ = ShfZ_t.size(0);
PairDist<DataT> d = d_centralAtom[cIdx];
int start_idx = d_centerAtomStartIdx[cIdx];
int jnum = d_nPairsPerCenterAtom[cIdx];
// center atom
int i = d.i;
int mol_idx = d.midx;
DataT xi = pos_t[mol_idx][i][0];
DataT yi = pos_t[mol_idx][i][1];
DataT zi = pos_t[mol_idx][i][2];
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
PairDist<DataT> dij = d_Rij[start_idx + jj];
int j = dij.j;
DataT Rij = dij.Rij;
SpeciesT type_j = species_t[mol_idx][j];
sdx[jj] = pos_t[mol_idx][j][0] - xi;
sdy[jj] = pos_t[mol_idx][j][1] - yi;
sdz[jj] = pos_t[mol_idx][j][2] - zi;
stype[jj] = type_j;
sdist[jj] = Rij;
// cutoff
DataT fc_ij = 0.5 * cos(PI * Rij / Rca) + 0.5;
DataT fc_ij_grad = -0.5 * (PI / Rca) * sin(PI * Rij / Rca);
sfc[jj] = fc_ij;
sfc_grad[jj] = fc_ij_grad;
}
// grad init
DataT sdix_grad = 0;
DataT sdiy_grad = 0;
DataT sdiz_grad = 0;
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
sdjx_grad[jj] = 0;
sdjy_grad[jj] = 0;
sdjz_grad[jj] = 0;
}
short2 tile = make_short2(laneIdx % TILEX, laneIdx / TILEX);
const DataT tc = 0.95; // theta constant factor
// must sync if threads_per_catom != 32 (wrap size) to make sure shared data is ready
// __syncthreads
for (int jj = 0; jj < jnum; jj++) {
const DataT Rij = sdist[jj];
SpeciesT type_j = stype[jj];
DataT fc_ij = sfc[jj];
DataT grad_fc_ij = sfc_grad[jj];
for (int kk_start = jj + 1; kk_start < jnum; kk_start += threads_per_catom) {
int kk = kk_start + laneIdx;
DataT theta = 0;
DataT grad_theta_vij_x = 0;
DataT grad_theta_vij_y = 0;
DataT grad_theta_vij_z = 0;
DataT grad_theta_vik_x = 0;
DataT grad_theta_vik_y = 0;
DataT grad_theta_vik_z = 0;
if (kk < jnum) {
const DataT Rik = sdist[kk];
DataT vij_vik_dot = sdx[jj] * sdx[kk] + sdy[jj] * sdy[kk] + sdz[jj] * sdz[kk];
theta = acos(tc * vij_vik_dot / (Rij * Rik));
// grad
DataT vij_factor =
tc / (Rij * Rij * Rij * sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rij * Rij) + Rik * Rik));
DataT vik_factor = tc /
(Rik * Rik * Rik *
sqrt(-tc * tc * vij_vik_dot * vij_vik_dot / (Rik * Rik) + Rij * Rij)); // tricky 80ms improved
grad_theta_vij_x = vij_factor * (sdx[jj] * vij_vik_dot - sdx[kk] * Rij * Rij);
grad_theta_vij_y = vij_factor * (sdy[jj] * vij_vik_dot - sdy[kk] * Rij * Rij);
grad_theta_vij_z = vij_factor * (sdz[jj] * vij_vik_dot - sdz[kk] * Rij * Rij);
grad_theta_vik_x = vik_factor * (sdx[kk] * vij_vik_dot - sdx[jj] * Rik * Rik);
grad_theta_vik_y = vik_factor * (sdy[kk] * vij_vik_dot - sdy[jj] * Rik * Rik);
grad_theta_vik_z = vik_factor * (sdz[kk] * vij_vik_dot - sdz[jj] * Rik * Rik);
}
for (int srcLane = 0; srcLane < C10_WARP_SIZE && (kk_start + srcLane) < jnum; ++srcLane) {
int kk = kk_start + srcLane;
DataT theta_ijk = __shfl_sync(0xFFFFFFFF, theta, srcLane);
// TODO necessary?
DataT grad_theta_vij_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_x, srcLane);
DataT grad_theta_vij_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_y, srcLane);
DataT grad_theta_vij_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vij_z, srcLane);
DataT grad_theta_vik_x_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_x, srcLane);
DataT grad_theta_vik_y_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_y, srcLane);
DataT grad_theta_vik_z_ = __shfl_sync(0xFFFFFFFF, grad_theta_vik_z, srcLane);
const DataT Rik = sdist[kk];
SpeciesT type_k = stype[kk];
DataT fc_ik = sfc[kk];
DataT grad_fc_ik = sfc_grad[kk];
DataT Rijk = (Rij + Rik) / 2;
DataT fc_ijk = fc_ij * fc_ik;
IndexT subaev_offset = angular_sublength * csubaev_offsets(type_j, type_k, num_species);
float3 grad_vij = make_float3(0.f, 0.f, 0.f);
float3 grad_vik = make_float3(0.f, 0.f, 0.f);
for (int itheta = tile.x; itheta < nShfZ; itheta += TILEX) {
DataT ShfZ = ShfZ_t[itheta];
DataT factor1 = pow((1 + cos(theta_ijk - ShfZ)) / 2, Zeta);
DataT grad_factor1_theta = 1.0 / 2.0 * Zeta * pow((1 + cos(ShfZ - theta_ijk)) / 2, Zeta - 1) *
sin(ShfZ - theta_ijk); // tricky 100ms improved
for (int ishfr = tile.y; ishfr < nShfA; ishfr += TILEY) {
DataT ShfA = ShfA_t[ishfr];
DataT factor2 = exp(-EtaA * (Rijk - ShfA) * (Rijk - ShfA));
DataT grad_factor2_dist = -EtaA * (Rijk - ShfA) * factor2;
DataT grad_vij_x = 2 *
(grad_factor1_theta * grad_theta_vij_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdx[jj] / Rij);
DataT grad_vij_y = 2 *
(grad_factor1_theta * grad_theta_vij_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdy[jj] / Rij);
DataT grad_vij_z = 2 *
(grad_factor1_theta * grad_theta_vij_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[jj] / Rij * fc_ijk +
factor1 * factor2 * fc_ik * grad_fc_ij * sdz[jj] / Rij);
DataT grad_vik_x = 2 *
(grad_factor1_theta * grad_theta_vik_x_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdx[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdx[kk] / Rik);
DataT grad_vik_y = 2 *
(grad_factor1_theta * grad_theta_vik_y_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdy[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdy[kk] / Rik);
DataT grad_vik_z = 2 *
(grad_factor1_theta * grad_theta_vik_z_ * factor2 * fc_ijk +
factor1 * grad_factor2_dist * sdz[kk] / Rik * fc_ijk +
factor1 * factor2 * fc_ij * grad_fc_ik * sdz[kk] / Rik);
if (is_double_backward) {
int atomj_idx = d_Rij[start_idx + jj].j;
int atomk_idx = d_Rij[start_idx + kk].j;
auto& grad_force = grad_output;
auto& grad_grad_aev = grad_input;
grad_vij_x *= (grad_force[mol_idx][atomj_idx][0] - grad_force[mol_idx][i][0]);
grad_vij_y *= (grad_force[mol_idx][atomj_idx][1] - grad_force[mol_idx][i][1]);
grad_vij_z *= (grad_force[mol_idx][atomj_idx][2] - grad_force[mol_idx][i][2]);
grad_vik_x *= (grad_force[mol_idx][atomk_idx][0] - grad_force[mol_idx][i][0]);
grad_vik_y *= (grad_force[mol_idx][atomk_idx][1] - grad_force[mol_idx][i][1]);
grad_vik_z *= (grad_force[mol_idx][atomk_idx][2] - grad_force[mol_idx][i][2]);
atomicAdd(
&grad_grad_aev[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta],
grad_vij_x + grad_vij_y + grad_vij_z + grad_vik_x + grad_vik_y + grad_vik_z);
} else {
DataT grad_output_item = grad_output[mol_idx][i][radial_length + subaev_offset + ishfr * nShfZ + itheta];
grad_vij_x *= grad_output_item;
grad_vij_y *= grad_output_item;
grad_vij_z *= grad_output_item;
grad_vik_x *= grad_output_item;
grad_vik_y *= grad_output_item;
grad_vik_z *= grad_output_item;
grad_vij.x += grad_vij_x;
grad_vij.y += grad_vij_y;
grad_vij.z += grad_vij_z;
grad_vik.x += grad_vik_x;
grad_vik.y += grad_vik_y;
grad_vik.z += grad_vik_z;
}
}
}
if (!is_double_backward) {
sdix_grad += (-grad_vij.x - grad_vik.x);
sdiy_grad += (-grad_vij.y - grad_vik.y);
sdiz_grad += (-grad_vij.z - grad_vik.z);
for (int offset = 16; offset > 0; offset /= 2) {
grad_vij.x += __shfl_down_sync(0xFFFFFFFF, grad_vij.x, offset);
grad_vij.y += __shfl_down_sync(0xFFFFFFFF, grad_vij.y, offset);
grad_vij.z += __shfl_down_sync(0xFFFFFFFF, grad_vij.z, offset);
grad_vik.x += __shfl_down_sync(0xFFFFFFFF, grad_vik.x, offset);
grad_vik.y += __shfl_down_sync(0xFFFFFFFF, grad_vik.y, offset);
grad_vik.z += __shfl_down_sync(0xFFFFFFFF, grad_vik.z, offset);
}
if (laneIdx == 0) {
sdjx_grad[jj] += grad_vij.x;
sdjy_grad[jj] += grad_vij.y;
sdjz_grad[jj] += grad_vij.z;
sdjx_grad[kk] += grad_vik.x;
sdjy_grad[kk] += grad_vik.y;
sdjz_grad[kk] += grad_vik.z;
}
}
}
}
}
if (!is_double_backward) {
auto& grad_coord = grad_input;
int atomi_idx = i;
atomicAdd(&grad_coord[mol_idx][atomi_idx][0], sdix_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][1], sdiy_grad);
atomicAdd(&grad_coord[mol_idx][atomi_idx][2], sdiz_grad);
for (int jj = laneIdx; jj < jnum; jj += threads_per_catom) {
int atomj_idx = d_Rij[start_idx + jj].j;
atomicAdd(&grad_coord[mol_idx][atomj_idx][0], sdjx_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][1], sdjy_grad[jj]);
atomicAdd(&grad_coord[mol_idx][atomj_idx][2], sdjz_grad[jj]);
}
}
}
template <typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits> aev_t,
PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR)) * fc;
atomicAdd(&aev_t[mol_idx][i][type_j * radial_sublength + ishfr], GmR);
}
}
// every <THREADS_PER_RIJ> threads take care of 1 RIJ, and iterate <nShfR / THREADS_PER_RIJ> times
template <bool is_double_backward, typename SpeciesT, typename DataT, int THREADS_PER_RIJ>
__global__ void cuRadialAEVs_backward_or_doublebackward(
torch::PackedTensorAccessor32<SpeciesT, 2, torch::RestrictPtrTraits> species_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> ShfR_t,
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits> EtaR_t,
torch::PackedTensorAccessor32<DataT, 3, torch::RestrictPtrTraits>
grad_aev, // daev for backward, ddaev for double backward
torch::PackedTensorAccessor32<DataT, 1, torch::RestrictPtrTraits>
grad_dist, // ddist for backward, dddist for double backward
const PairDist<DataT>* d_Rij,
float Rcr,
int radial_length,
int radial_sublength,
int nRadialRij) {
int gidx = blockIdx.x * blockDim.x + threadIdx.x;
int idx = gidx / THREADS_PER_RIJ;
int nShfR = ShfR_t.size(0);
DataT EtaR = EtaR_t[0];
if (idx >= nRadialRij)
return;
int laneIdx = threadIdx.x % THREADS_PER_RIJ;
PairDist<DataT> d = d_Rij[idx];
DataT Rij = d.Rij;
int mol_idx = d.midx;
int i = d.i;
int j = d.j;
SpeciesT type_j = species_t[mol_idx][j];
DataT fc = 0.5 * cos(PI * Rij / Rcr) + 0.5;
DataT fc_grad = -0.5 * (PI / Rcr) * sin(PI * Rij / Rcr);
DataT upstream_grad;
if (is_double_backward) {
upstream_grad = grad_dist[idx];
}
for (int ishfr = laneIdx; ishfr < nShfR; ishfr += THREADS_PER_RIJ) {
DataT ShfR = ShfR_t[ishfr];
DataT GmR = 0.25 * exp(-EtaR * (Rij - ShfR) * (Rij - ShfR));
DataT GmR_grad = -EtaR * (-2 * ShfR + 2 * Rij) * GmR;
DataT jacobian = GmR_grad * fc + GmR * fc_grad;
if (is_double_backward) {
atomicAdd(&grad_aev[mol_idx][i][type_j * radial_sublength + ishfr], upstream_grad * jacobian);
} else {
upstream_grad = grad_aev[mol_idx][i][type_j * radial_sublength + ishfr];
atomicAdd(&grad_dist[idx], upstream_grad * jacobian);
}
}
}
// NOTE: assumes size of EtaA_t = Zeta_t = EtaR_t = 1
Result cuaev_forward(const Tensor& coordinates_t, const Tensor& species_t, const AEVScalarParams& aev_params) {
TORCH_CHECK(
(species_t.dtype() == torch::kInt32) && (coordinates_t.dtype() == torch::kFloat32), "Unsupported input type");
TORCH_CHECK(
aev_params.EtaR_t.size(0) == 1 && aev_params.EtaA_t.size(0) == 1 && aev_params.Zeta_t.size(0) == 1,
"cuda extension is currently not supported for the specified "
"configuration");
TORCH_CHECK(
coordinates_t.device() == species_t.device() && coordinates_t.device() == aev_params.EtaR_t.device() &&
coordinates_t.device() == aev_params.EtaA_t.device(),
"coordinates, species, and aev_params should be on the same device");
float Rcr = aev_params.Rcr;
float Rca = aev_params.Rca;
const int n_molecules = species_t.size(0);
const int max_natoms_per_mol = species_t.size(1);
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto aev_t = torch::zeros({n_molecules, max_natoms_per_mol, aev_length}, coordinates_t.options());
if (species_t.numel() == 0) {
return {
aev_t, Tensor(), Tensor(), Tensor(), 0, 0, 0, Tensor(), Tensor(), Tensor(), 0, 0, 0, coordinates_t, species_t};
}
at::cuda::CUDAGuard device_guard(coordinates_t.device().index());
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
auto& allocator = *c10::cuda::CUDACachingAllocator::get();
// buffer to store all the pairwise distance (Rij)
auto total_natom_pairs = n_molecules * max_natoms_per_mol * max_natoms_per_mol;
auto d_options = torch::dtype(torch::kUInt8).device(coordinates_t.device());
float inf = std::numeric_limits<float>::infinity();
Tensor tensor_Rij =
torch::full(sizeof(PairDist<float>) / sizeof(float) * total_natom_pairs, inf, d_options.dtype(torch::kFloat32));
PairDist<float>* d_Rij = (PairDist<float>*)tensor_Rij.data_ptr();
// buffer to store all the pairwise distance that is needed for Radial AEV
// computation
Tensor tensor_radialRij = torch::empty(sizeof(PairDist<float>) * total_natom_pairs, d_options);
PairDist<float>* d_radialRij = (PairDist<float>*)tensor_radialRij.data_ptr();
auto buffer_count = allocator.allocate(sizeof(int));
int* d_count_out = (int*)buffer_count.get();
const int block_size = 64;
if (n_molecules == 1) {
int tileWidth = 32;
int tilesPerRow = (max_natoms_per_mol + tileWidth - 1) / tileWidth;
dim3 block(tileWidth, tileWidth, 1);
dim3 grid(tilesPerRow, tilesPerRow, 1);
pairwiseDistanceSingleMolecule<<<grid, block, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
dim3 block(8, 8, 1);
// Compute pairwise distance (Rij) for all atom pairs in a molecule
// maximum 4096 atoms, which needs 49152 byte (48 kb) of shared memory
// TODO: the kernel is not optimized for batched huge molecule (max_natoms_per_mol > 1000)
pairwiseDistance<<<n_molecules, block, sizeof(float) * max_natoms_per_mol * 3, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_Rij,
max_natoms_per_mol);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
// Extract Rijs that is needed for RadialAEV comptuation i.e. all the Rij <= Rcr
int nRadialRij = cubDeviceSelect(
d_Rij,
d_radialRij,
total_natom_pairs,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rcr; },
stream);
int nblocks = (nRadialRij * 8 + block_size - 1) / block_size;
cuRadialAEVs<int, float, 8><<<nblocks, block_size, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
nRadialRij);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// reuse buffer allocated for all Rij
// d_angularRij will store all the Rij required in Angular AEV computation
Tensor tensor_angularRij = torch::empty(sizeof(PairDist<float>) * nRadialRij, d_options);
PairDist<float>* d_angularRij = (PairDist<float>*)tensor_angularRij.data_ptr();
// Extract Rijs that is needed for AngularAEV comptuation i.e. all the Rij
// <= Rca
int nAngularRij = cubDeviceSelect(
d_radialRij,
d_angularRij,
nRadialRij,
d_count_out,
[=] __device__(const PairDist<float> d) { return d.Rij <= Rca; },
stream);
Tensor tensor_centralAtom = torch::empty(sizeof(PairDist<float>) * nAngularRij, d_options);
PairDist<float>* d_centralAtom = (PairDist<float>*)tensor_centralAtom.data_ptr();
Tensor tensor_numPairsPerCenterAtom = torch::empty(sizeof(int) * nAngularRij, d_options);
int* d_numPairsPerCenterAtom = (int*)tensor_numPairsPerCenterAtom.data_ptr();
// group by center atom
int ncenter_atoms = cubEncode(d_angularRij, d_centralAtom, d_numPairsPerCenterAtom, nAngularRij, d_count_out, stream);
Tensor tensor_centerAtomStartIdx = torch::empty(sizeof(int) * ncenter_atoms, d_options);
int* d_centerAtomStartIdx = (int*)tensor_centerAtomStartIdx.data_ptr();
cubScan(d_numPairsPerCenterAtom, d_centerAtomStartIdx, ncenter_atoms, stream);
{
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sm_aev = sizeof(float) * align<4>(aev_params.angular_length); // (angular_length / 4 + 1) * 4
int sxyz = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sm_aev + sxyz + sRij + sfc + sj) * ncatom_per_tpb;
};
int maxNbrsPerCenterAtom = cubMax(d_numPairsPerCenterAtom, ncenter_atoms, d_count_out, stream);
int maxnbrs_per_atom_aligned = align<4>(maxNbrsPerCenterAtom);
int smem_size_aligned = smem_size(maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
int angular_length_aligned = align<4>(aev_params.angular_length);
cuAngularAEVs<<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return {
aev_t,
tensor_Rij,
tensor_radialRij,
tensor_angularRij,
total_natom_pairs,
nRadialRij,
nAngularRij,
tensor_centralAtom,
tensor_numPairsPerCenterAtom,
tensor_centerAtomStartIdx,
maxnbrs_per_atom_aligned,
angular_length_aligned,
ncenter_atoms,
coordinates_t,
species_t};
}
}
Tensor cuaev_backward(const Tensor& grad_output, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
at::cuda::CUDAGuard device_guard(coordinates_t.device().index());
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
auto grad_coord = torch::zeros(coordinates_t.sizes(), coordinates_t.options().requires_grad(false)); // [2, 5, 3]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
Tensor grad_radial_dist = torch::zeros(result.nRadialRij, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
cuRadialAEVs_backward_or_doublebackward<false, int, float, 8><<<nblocks, block_size, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
C10_CUDA_KERNEL_LAUNCH_CHECK();
// For best result, block_size should match average molecule size (no padding) to avoid atomicAdd
nblocks = (result.nRadialRij + block_size - 1) / block_size;
pairwiseDistance_backward_or_doublebackward<false><<<nblocks, block_size, 0, stream>>>(
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_radial_dist.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
C10_CUDA_KERNEL_LAUNCH_CHECK();
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
Tensor grad_angular_coord = torch::zeros({result.nAngularRij, 3}, coordinates_t.options().requires_grad(false));
cuAngularAEVs_backward_or_doublebackward<false><<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_coord.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return grad_coord;
}
Tensor cuaev_double_backward(const Tensor& grad_force, const AEVScalarParams& aev_params, const Result& result) {
using namespace torch::indexing;
Tensor coordinates_t = result.coordinates_t;
Tensor species_t = result.species_t;
const int n_molecules = coordinates_t.size(0);
const int max_natoms_per_mol = coordinates_t.size(1);
at::cuda::CUDAGuard device_guard(coordinates_t.device().index());
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
int aev_length = aev_params.radial_length + aev_params.angular_length;
auto grad_grad_aev = torch::zeros(
{coordinates_t.size(0), coordinates_t.size(1), aev_length},
coordinates_t.options().requires_grad(false)); // [2, 5, 384]
PairDist<float>* d_Rij = (PairDist<float>*)result.tensor_Rij.data_ptr();
PairDist<float>* d_radialRij = (PairDist<float>*)result.tensor_radialRij.data_ptr();
PairDist<float>* d_angularRij = (PairDist<float>*)result.tensor_angularRij.data_ptr();
PairDist<float>* d_centralAtom = (PairDist<float>*)result.tensor_centralAtom.data_ptr();
int* d_numPairsPerCenterAtom = (int*)result.tensor_numPairsPerCenterAtom.data_ptr();
int* d_centerAtomStartIdx = (int*)result.tensor_centerAtomStartIdx.data_ptr();
auto grad_force_coord_Rij = torch::zeros({result.nRadialRij}, coordinates_t.options().requires_grad(false));
int block_size = 64;
int nblocks = (result.nRadialRij + block_size - 1) / block_size;
pairwiseDistance_backward_or_doublebackward<true><<<nblocks, block_size, 0, stream>>>(
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_radialRij,
result.nRadialRij);
C10_CUDA_KERNEL_LAUNCH_CHECK();
nblocks = (result.nRadialRij * 8 + block_size - 1) / block_size;
cuRadialAEVs_backward_or_doublebackward<true, int, float, 8><<<nblocks, block_size, 0, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
aev_params.ShfR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaR_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_force_coord_Rij.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
d_radialRij,
aev_params.Rcr,
aev_params.radial_length,
aev_params.radial_sublength,
result.nRadialRij);
C10_CUDA_KERNEL_LAUNCH_CHECK();
auto smem_size = [&aev_params](int max_nbrs, int ncatom_per_tpb) {
int sxyz = sizeof(float) * max_nbrs * 3;
int sj_xyz_grad = sizeof(float) * max_nbrs * 3;
int sRij = sizeof(float) * max_nbrs;
int sfc = sizeof(float) * max_nbrs;
int sfc_grad = sizeof(float) * max_nbrs;
int sj = sizeof(int) * max_nbrs;
return (sxyz + sj_xyz_grad + sRij + sfc + sfc_grad + sj) * ncatom_per_tpb;
};
block_size = 32;
const int nthreads_per_catom = 32;
const int nblocks_angAEV = (result.ncenter_atoms * nthreads_per_catom + block_size - 1) / block_size;
int smem_size_aligned = smem_size(result.maxnbrs_per_atom_aligned, block_size / nthreads_per_catom);
cuAngularAEVs_backward_or_doublebackward<true><<<nblocks_angAEV, block_size, smem_size_aligned, stream>>>(
species_t.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
coordinates_t.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
aev_params.ShfA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.ShfZ_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.EtaA_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
aev_params.Zeta_t.packed_accessor32<float, 1, torch::RestrictPtrTraits>(),
grad_force.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
grad_grad_aev.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
d_angularRij,
d_centralAtom,
d_numPairsPerCenterAtom,
d_centerAtomStartIdx,
aev_params.Rca,
aev_params.angular_length,
aev_params.angular_sublength,
aev_params.radial_length,
aev_params.num_species,
result.maxnbrs_per_atom_aligned,
result.angular_length_aligned,
result.ncenter_atoms);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return grad_grad_aev;
}
|
88adebec1ccd20016dc5ca05e48272ff172ac234.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Naive_Hist(int* d_result, int* d_hist, int n_vertices) {
//each block compares the same row to all others row2
int row = blockIdx.x;
int row2 = threadIdx.x;
bool equal;
//shared count for whole block/same vertice
__shared__ int count;
//one thread sets count to zero and syncsthreads.
if(row2 == 0)
count = 0;
__syncthreads();
//checks equality to other vertices
if(row < n_vertices && row2 < n_vertices)
for(int i = row2; i < n_vertices; i += blockDim.x) {
//checks equality of vertices lcm
equal = false;
for(int j = 0; j < n_vertices; j++) {
if(d_result[row*n_vertices +j] == d_result[i*n_vertices + j])
equal = true;
else {
equal = false;
break;
}
}
//adds to count if vertices are equal
if(equal)
atomicAdd(&count, 1);
}
//syncsthreads so count is done and increments hist[count]
__syncthreads();
if(row < n_vertices && row2 == 0 && count > 0)
atomicAdd(&d_hist[count], 1);
} | 88adebec1ccd20016dc5ca05e48272ff172ac234.cu | #include "includes.h"
__global__ void Naive_Hist(int* d_result, int* d_hist, int n_vertices) {
//each block compares the same row to all others row2
int row = blockIdx.x;
int row2 = threadIdx.x;
bool equal;
//shared count for whole block/same vertice
__shared__ int count;
//one thread sets count to zero and syncsthreads.
if(row2 == 0)
count = 0;
__syncthreads();
//checks equality to other vertices
if(row < n_vertices && row2 < n_vertices)
for(int i = row2; i < n_vertices; i += blockDim.x) {
//checks equality of vertices lcm
equal = false;
for(int j = 0; j < n_vertices; j++) {
if(d_result[row*n_vertices +j] == d_result[i*n_vertices + j])
equal = true;
else {
equal = false;
break;
}
}
//adds to count if vertices are equal
if(equal)
atomicAdd(&count, 1);
}
//syncsthreads so count is done and increments hist[count]
__syncthreads();
if(row < n_vertices && row2 == 0 && count > 0)
atomicAdd(&d_hist[count], 1);
} |
a9777e2626b742a6d97d9015f7bb6189d1c0e2d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "cudautils.h"
#include "tensor.h"
#include "matmul.cuh"
#include "sum.cuh"
const int BLOCK_SIZE = 32;
const int BLOCK_SIZE_SUM = 1024;
__global__ void linear(float *a, float *x, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * x[i * k + col];
}
c[row * k + col] = sum + b[col];
}
}
extern "C" {
int gpu_linear_forward(const TENSOR *target, const TENSOR *a, const TENSOR* x, TENSOR *b)
{
float* gpu_a;
size_t a_size = a->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_a, a_size));
checkCudaErr(hipMemcpy(gpu_a, &a->data[0], a_size, hipMemcpyHostToDevice));
float* gpu_b;
size_t b_size = b->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_b, b_size));
checkCudaErr(hipMemcpy(gpu_b, &b->data[0], b_size, hipMemcpyHostToDevice));
float* gpu_x;
size_t x_size = x->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_x, x_size));
checkCudaErr(hipMemcpy(gpu_x, &x->data[0], x_size, hipMemcpyHostToDevice));
float* gpu_target;
size_t target_size = target->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc(&gpu_target, target_size));
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize((x->mat_shape->y + BLOCK_SIZE - 1) / BLOCK_SIZE, (a->mat_shape->x + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL(( linear), dim3(gridSize), dim3(blockSize), 0, 0, gpu_a, gpu_x, gpu_b, gpu_target, a->mat_shape->x, a->mat_shape->y, x->mat_shape->y);
checkCudaKernelErr("linear", blockSize, gridSize);
checkCudaErr(hipMemcpy(&target->data[0], gpu_target, target_size, hipMemcpyDeviceToHost));
hipFree(gpu_a);
hipFree(gpu_x);
hipFree(gpu_b);
hipFree(gpu_target);
return 0;
}
int gpu_linear_backward(const TENSOR *tensor, const TENSOR *a, const TENSOR *x, TENSOR *b)
{
float* gpu_tensor_grad;
size_t gpu_tensor_grad_size = tensor->grad_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_tensor_grad, gpu_tensor_grad_size));
checkCudaErr(hipMemcpy(gpu_tensor_grad, &tensor->grad[0], gpu_tensor_grad_size, hipMemcpyHostToDevice));
float* gpu_a;
size_t a_size = a->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_a, a_size));
checkCudaErr(hipMemcpy(gpu_a, &a->data[0], a_size, hipMemcpyHostToDevice));
float* gpu_a_grad;
checkCudaErr(hipMalloc(&gpu_a_grad, a_size));
float* gpu_b;
size_t b_size = b->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_b, b_size));
checkCudaErr(hipMemcpy(gpu_b, &b->data[0], b_size, hipMemcpyHostToDevice));
float* gpu_b_grad;
size_t b_grad_size = b->grad_shape->size * sizeof(float);
checkCudaErr(hipMalloc(&gpu_b_grad, b_grad_size));
float* gpu_x;
size_t x_size = x->mat_shape->size * sizeof(float);
checkCudaErr(hipMalloc((void**)&gpu_x, x_size));
checkCudaErr(hipMemcpy(gpu_x, &x->data[0], x_size, hipMemcpyHostToDevice));
float* gpu_x_grad;
checkCudaErr(hipMalloc(&gpu_x_grad, x_size));
hipStream_t streamA, streamB;
hipStreamCreate(&streamA);
hipStreamCreate(&streamB);
// A GRAD
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize((x->mat_shape->x + BLOCK_SIZE - 1) / BLOCK_SIZE, (tensor->grad_shape->x + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL(( matmul_a_grad), dim3(gridSize), dim3(blockSize), 0, streamA, gpu_tensor_grad, gpu_x, gpu_a_grad, tensor->grad_shape->x, tensor->grad_shape->y, x->mat_shape->x);
checkCudaKernelErr("matmul_a_grad", blockSize, gridSize);
checkCudaErr(hipMemcpy(&a->grad[0], gpu_a_grad, a_size, hipMemcpyDeviceToHost));
// X GRAD
gridSize.x = (a->mat_shape->y + BLOCK_SIZE - 1) / BLOCK_SIZE;
gridSize.y = (tensor->grad_shape->y + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( matmul_b_grad), dim3(gridSize), dim3(blockSize), 0, streamB, gpu_tensor_grad, gpu_a, gpu_x_grad, tensor->grad_shape->y, tensor->grad_shape->x, a->mat_shape->y);
checkCudaKernelErr("matmul_b_grad", blockSize, gridSize);
checkCudaErr(hipMemcpy(&x->grad[0], gpu_x_grad, x_size, hipMemcpyDeviceToHost));
// B GRAD
blockSize.x = BLOCK_SIZE_SUM;
blockSize.y = 1;
gridSize.x = (tensor->grad_shape->y + BLOCK_SIZE_SUM - 1) / BLOCK_SIZE_SUM;
gridSize.y = 1;
hipLaunchKernelGGL(( sum0), dim3(gridSize), dim3(blockSize), 0, 0, gpu_tensor_grad, gpu_b_grad, tensor->grad_shape->y, tensor->grad_shape->x);
checkCudaKernelErr("sum0", blockSize, gridSize);
checkCudaErr(hipMemcpy(&b->grad[0], gpu_b_grad, b_grad_size, hipMemcpyDeviceToHost));
hipStreamDestroy(streamA);
hipStreamDestroy(streamB);
hipFree(gpu_tensor_grad);
hipFree(gpu_a);
hipFree(gpu_x);
hipFree(gpu_b);
hipFree(gpu_a_grad);
hipFree(gpu_x_grad);
hipFree(gpu_b_grad);
return 0;
}
}
| a9777e2626b742a6d97d9015f7bb6189d1c0e2d8.cu | #include <stdio.h>
#include <cuda.h>
#include "cudautils.h"
#include "tensor.h"
#include "matmul.cuh"
#include "sum.cuh"
const int BLOCK_SIZE = 32;
const int BLOCK_SIZE_SUM = 1024;
__global__ void linear(float *a, float *x, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
{
sum += a[row * n + i] * x[i * k + col];
}
c[row * k + col] = sum + b[col];
}
}
extern "C" {
int gpu_linear_forward(const TENSOR *target, const TENSOR *a, const TENSOR* x, TENSOR *b)
{
float* gpu_a;
size_t a_size = a->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_a, a_size));
checkCudaErr(cudaMemcpy(gpu_a, &a->data[0], a_size, cudaMemcpyHostToDevice));
float* gpu_b;
size_t b_size = b->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_b, b_size));
checkCudaErr(cudaMemcpy(gpu_b, &b->data[0], b_size, cudaMemcpyHostToDevice));
float* gpu_x;
size_t x_size = x->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_x, x_size));
checkCudaErr(cudaMemcpy(gpu_x, &x->data[0], x_size, cudaMemcpyHostToDevice));
float* gpu_target;
size_t target_size = target->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc(&gpu_target, target_size));
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize((x->mat_shape->y + BLOCK_SIZE - 1) / BLOCK_SIZE, (a->mat_shape->x + BLOCK_SIZE - 1) / BLOCK_SIZE);
linear<<<gridSize, blockSize>>>(gpu_a, gpu_x, gpu_b, gpu_target, a->mat_shape->x, a->mat_shape->y, x->mat_shape->y);
checkCudaKernelErr("linear", blockSize, gridSize);
checkCudaErr(cudaMemcpy(&target->data[0], gpu_target, target_size, cudaMemcpyDeviceToHost));
cudaFree(gpu_a);
cudaFree(gpu_x);
cudaFree(gpu_b);
cudaFree(gpu_target);
return 0;
}
int gpu_linear_backward(const TENSOR *tensor, const TENSOR *a, const TENSOR *x, TENSOR *b)
{
float* gpu_tensor_grad;
size_t gpu_tensor_grad_size = tensor->grad_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_tensor_grad, gpu_tensor_grad_size));
checkCudaErr(cudaMemcpy(gpu_tensor_grad, &tensor->grad[0], gpu_tensor_grad_size, cudaMemcpyHostToDevice));
float* gpu_a;
size_t a_size = a->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_a, a_size));
checkCudaErr(cudaMemcpy(gpu_a, &a->data[0], a_size, cudaMemcpyHostToDevice));
float* gpu_a_grad;
checkCudaErr(cudaMalloc(&gpu_a_grad, a_size));
float* gpu_b;
size_t b_size = b->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_b, b_size));
checkCudaErr(cudaMemcpy(gpu_b, &b->data[0], b_size, cudaMemcpyHostToDevice));
float* gpu_b_grad;
size_t b_grad_size = b->grad_shape->size * sizeof(float);
checkCudaErr(cudaMalloc(&gpu_b_grad, b_grad_size));
float* gpu_x;
size_t x_size = x->mat_shape->size * sizeof(float);
checkCudaErr(cudaMalloc((void**)&gpu_x, x_size));
checkCudaErr(cudaMemcpy(gpu_x, &x->data[0], x_size, cudaMemcpyHostToDevice));
float* gpu_x_grad;
checkCudaErr(cudaMalloc(&gpu_x_grad, x_size));
cudaStream_t streamA, streamB;
cudaStreamCreate(&streamA);
cudaStreamCreate(&streamB);
// A GRAD
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize((x->mat_shape->x + BLOCK_SIZE - 1) / BLOCK_SIZE, (tensor->grad_shape->x + BLOCK_SIZE - 1) / BLOCK_SIZE);
matmul_a_grad<<<gridSize, blockSize, 0, streamA>>>(gpu_tensor_grad, gpu_x, gpu_a_grad, tensor->grad_shape->x, tensor->grad_shape->y, x->mat_shape->x);
checkCudaKernelErr("matmul_a_grad", blockSize, gridSize);
checkCudaErr(cudaMemcpy(&a->grad[0], gpu_a_grad, a_size, cudaMemcpyDeviceToHost));
// X GRAD
gridSize.x = (a->mat_shape->y + BLOCK_SIZE - 1) / BLOCK_SIZE;
gridSize.y = (tensor->grad_shape->y + BLOCK_SIZE - 1) / BLOCK_SIZE;
matmul_b_grad<<<gridSize, blockSize, 0, streamB>>>(gpu_tensor_grad, gpu_a, gpu_x_grad, tensor->grad_shape->y, tensor->grad_shape->x, a->mat_shape->y);
checkCudaKernelErr("matmul_b_grad", blockSize, gridSize);
checkCudaErr(cudaMemcpy(&x->grad[0], gpu_x_grad, x_size, cudaMemcpyDeviceToHost));
// B GRAD
blockSize.x = BLOCK_SIZE_SUM;
blockSize.y = 1;
gridSize.x = (tensor->grad_shape->y + BLOCK_SIZE_SUM - 1) / BLOCK_SIZE_SUM;
gridSize.y = 1;
sum0<<<gridSize, blockSize>>>(gpu_tensor_grad, gpu_b_grad, tensor->grad_shape->y, tensor->grad_shape->x);
checkCudaKernelErr("sum0", blockSize, gridSize);
checkCudaErr(cudaMemcpy(&b->grad[0], gpu_b_grad, b_grad_size, cudaMemcpyDeviceToHost));
cudaStreamDestroy(streamA);
cudaStreamDestroy(streamB);
cudaFree(gpu_tensor_grad);
cudaFree(gpu_a);
cudaFree(gpu_x);
cudaFree(gpu_b);
cudaFree(gpu_a_grad);
cudaFree(gpu_x_grad);
cudaFree(gpu_b_grad);
return 0;
}
}
|
bc0bf134f29559cdc52bb5e8cde2679fc5234369.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "common.hpp"
#include "math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
} // namespace caffe
| bc0bf134f29559cdc52bb5e8cde2679fc5234369.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "common.hpp"
#include "math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
} // namespace caffe
|
cd79a6e29b1e43af82e06c7f9791d7c72a796015.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include<opencv2\imgproc.hpp>
#include <iostream>
#include<math.h>
__global__ void inclusiveScan(int* input, int * output,int* result,int space,int step, int steps,bool direction)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int tid = x + y*blockDim.x*gridDim.x;
int ret = 0;
if (direction) //odd step came
{
if (tid < space)
{
ret = output[tid];
input[tid] = ret;
}
else
{
ret = output[tid]+output[tid - space];
input[tid] = ret;
}
}
else //even step
{
if (tid < space) //copied as they were
{
ret = input[tid];
output[tid] = ret;
}
else //copied after index=x+2^step
{
ret = input[tid] + input[tid - space];
output[tid] = ret;
}
}
if (step == steps - 1)
result[tid] = ret;
}
int main()
{
int N = 1024*1024;
int* d_input;
int* d_output;
int* d_result;
int* h_result = new int[N];
int* h_input = new int[N];
for (int i = 0; i < N; i++)
{
h_input[i] = 1;
}
hipMalloc(&d_input, sizeof(int)*N);
hipMalloc(&d_output, sizeof(int)*N);
hipMalloc(&d_result, sizeof(int)*N);
dim3 threads(1024);
dim3 blocks(N/1024);
hipMemcpy(d_input,h_input,sizeof(int)*N,hipMemcpyHostToDevice);
int steps = static_cast<int>(log2(static_cast<float>(N)));
int space = 1;
for (int step = 0; step < steps; step++)
{
bool direction = (step % 2 != 0) ?true :false ;
inclusiveScan << <blocks,threads >> >(d_input,d_output,d_result,space,step,steps,direction);
space =space*2;//space=space*2;
}
//memCpy
hipMemcpy(h_result, d_result, sizeof(int)*N,hipMemcpyDeviceToHost);
for (int i = 0; i+1 < N; i++)
{
h_input[i+1] = h_input[i] + h_input[i + 1];
}
int correct = 0;
int incorrect = 0;
for (int i = 0; i < N; i++)
std::cout << h_result[i] << "\t" << h_input[i] << std::endl;
//(h_input[i]==h_result[i]) ?correct++ :incorrect++ ;
//std::cout << "Correct: " << correct << "\tincorrect: " << incorrect<<std::endl;
delete[]h_result;
delete[]h_input;
hipFree(d_input);
hipFree(d_output);
hipFree(d_result);
return 0;
}
| cd79a6e29b1e43af82e06c7f9791d7c72a796015.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include<opencv2\imgproc.hpp>
#include <iostream>
#include<math.h>
__global__ void inclusiveScan(int* input, int * output,int* result,int space,int step, int steps,bool direction)
{
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int tid = x + y*blockDim.x*gridDim.x;
int ret = 0;
if (direction) //odd step came
{
if (tid < space)
{
ret = output[tid];
input[tid] = ret;
}
else
{
ret = output[tid]+output[tid - space];
input[tid] = ret;
}
}
else //even step
{
if (tid < space) //copied as they were
{
ret = input[tid];
output[tid] = ret;
}
else //copied after index=x+2^step
{
ret = input[tid] + input[tid - space];
output[tid] = ret;
}
}
if (step == steps - 1)
result[tid] = ret;
}
int main()
{
int N = 1024*1024;
int* d_input;
int* d_output;
int* d_result;
int* h_result = new int[N];
int* h_input = new int[N];
for (int i = 0; i < N; i++)
{
h_input[i] = 1;
}
cudaMalloc(&d_input, sizeof(int)*N);
cudaMalloc(&d_output, sizeof(int)*N);
cudaMalloc(&d_result, sizeof(int)*N);
dim3 threads(1024);
dim3 blocks(N/1024);
cudaMemcpy(d_input,h_input,sizeof(int)*N,cudaMemcpyHostToDevice);
int steps = static_cast<int>(log2(static_cast<float>(N)));
int space = 1;
for (int step = 0; step < steps; step++)
{
bool direction = (step % 2 != 0) ?true :false ;
inclusiveScan << <blocks,threads >> >(d_input,d_output,d_result,space,step,steps,direction);
space =space*2;//space=space*2;
}
//memCpy
cudaMemcpy(h_result, d_result, sizeof(int)*N,cudaMemcpyDeviceToHost);
for (int i = 0; i+1 < N; i++)
{
h_input[i+1] = h_input[i] + h_input[i + 1];
}
int correct = 0;
int incorrect = 0;
for (int i = 0; i < N; i++)
std::cout << h_result[i] << "\t" << h_input[i] << std::endl;
//(h_input[i]==h_result[i]) ?correct++ :incorrect++ ;
//std::cout << "Correct: " << correct << "\tincorrect: " << incorrect<<std::endl;
delete[]h_result;
delete[]h_input;
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_result);
return 0;
}
|
c3777a3b1db52f79061e36ecab218b4bee716e2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHBlas.h"
#include "THHGeneral.h"
void THCudaBlas_init(THCState *state, int devices, int device)
{
THCBlasState *blas_state = state->blasState;
blas_state->handles = (hipblasHandle_t *)malloc(devices * sizeof(hipblasHandle_t));
for (int i = 0; i < devices; i++) {
// Create handle on each device:
hipSetDevice(i);
hipblasCreate(&blas_state->handles[i]);
}
// Set current handle:
blas_state->current_handle = &blas_state->handles[device];
blas_state->n_devices = devices;
// Restore device:
hipSetDevice(device);
}
void THCudaBlas_shutdown(THCState *state)
{
THCBlasState *blas_state = state->blasState;
for (int i = 0; i < blas_state->n_devices; i++) {
hipblasDestroy(blas_state->handles[i]);
}
free(blas_state->handles);
}
void THCudaBlas_reset(THCState *state, int device)
{
THCBlasState* blasState = state->blasState;
if (&blasState->handles[device] != blasState->current_handle) {
THError("Unexpected cuBLAS state");
}
hipblasCreate(&blasState->handles[device]);
blasState->current_handle = &blasState->handles[device];
}
void THCudaBlas_setHandle(THCState *state, int device)
{
THCBlasState *blas_state = state->blasState;
blas_state->current_handle = &blas_state->handles[device];
}
void THCudaBlas_setStream(THCState *state, int device, hipStream_t stream)
{
THCublasCheck(hipblasSetStream(state->blasState->handles[device], stream));
}
void THCudaBlas_swap(THCState *state, long n, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(hipblasSswap(*state->blasState->current_handle, i_n, x, i_incx, y, i_incy));
return;
}
THError("Cublas_swap only supports n, incx and"
" incy upto signed integer limits: %d", INT_MAX);
}
void THCudaBlas_scal(THCState *state, long n, float a, float *x, long incx)
{
if(n == 1)
incx = 1;
if( (n <= INT_MAX) && (incx <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
THCublasCheck(hipblasSscal(*state->blasState->current_handle, i_n, &a, x, i_incx));
return;
}
THError("Cublas_scal only supports n and incx "
"upto signed integer limits: %d", INT_MAX);
}
void THCudaBlas_copy(THCState *state, long n, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(hipblasScopy(*state->blasState->current_handle, i_n, x, i_incx, y, i_incy));
return;
}
THError("Cublas_copy only supports n, incx and incy "
"upto signed integer limits: %d", INT_MAX);
}
void THCudaBlas_axpy(THCState *state, long n, float a, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(hipblasSaxpy(*state->blasState->current_handle, i_n, &a, x, i_incx, y, i_incy));
return;
}
THError("Cublas_axpy only supports n, incx and incy "
"upto signed integer limits: %d", INT_MAX);
}
float THCudaBlas_dot(THCState *state, long n, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
THCublasCheck(hipblasSdot(*state->blasState->current_handle, i_n, x, i_incx, y, i_incy, &result));
hipDeviceSynchronize();
return result;
}
THError("Cublas_dot only supports n, incx and incy "
"upto signed integer limits: %d", INT_MAX);
return -1;
}
/* Level 2 */
void THCudaBlas_gemv(THCState *state, char trans, long m, long n, float alpha, float *a, long lda, float *x, long incx, float beta, float *y, long incy)
{
if(n == 1)
lda = m;
hipblasOperation_t op;
if (trans == 't') op = HIPBLAS_OP_T;
else if (trans == 'n') op = HIPBLAS_OP_N;
else if (trans == 'c') op = HIPBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(hipblasSgemv(*state->blasState->current_handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_gemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_ger(THCState *state, long m, long n, float alpha, float *x, long incx, float *y, long incy, float *a, long lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(hipblasSger(*state->blasState->current_handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_ger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
hipblasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return HIPBLAS_OP_T;
else if (trans == 'n') return HIPBLAS_OP_N;
else if (trans == 'c') return HIPBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return HIPBLAS_OP_T;
}
}
void adjustLd(char transa, char transb, long m, long n, long k, long *lda, long *ldb, long *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transa_)
{
if(m == 1)
*lda = k;
}
else
{
if(k == 1)
*lda = m;
}
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void THCudaBlas_gemm(THCState *state, char transa, char transb, long m, long n, long k, float alpha, float *a, long lda, float *b, long ldb, float beta, float *c, long ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
THCublasCheck(hipblasSgemm(*state->blasState->current_handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_gemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_gemmBatched(THCState *state, char transa, char transb, long m, long n, long k,
float alpha, const float *a[], long lda, const float *b[], long ldb,
float beta, float *c[], long ldc, long batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_gemm only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
THCublasCheck(hipblasSgemmBatched(*state->blasState->current_handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
| c3777a3b1db52f79061e36ecab218b4bee716e2d.cu | #include "THCBlas.h"
#include "THCGeneral.h"
void THCudaBlas_init(THCState *state, int devices, int device)
{
THCBlasState *blas_state = state->blasState;
blas_state->handles = (cublasHandle_t *)malloc(devices * sizeof(cublasHandle_t));
for (int i = 0; i < devices; i++) {
// Create handle on each device:
cudaSetDevice(i);
cublasCreate(&blas_state->handles[i]);
}
// Set current handle:
blas_state->current_handle = &blas_state->handles[device];
blas_state->n_devices = devices;
// Restore device:
cudaSetDevice(device);
}
void THCudaBlas_shutdown(THCState *state)
{
THCBlasState *blas_state = state->blasState;
for (int i = 0; i < blas_state->n_devices; i++) {
cublasDestroy(blas_state->handles[i]);
}
free(blas_state->handles);
}
void THCudaBlas_reset(THCState *state, int device)
{
THCBlasState* blasState = state->blasState;
if (&blasState->handles[device] != blasState->current_handle) {
THError("Unexpected cuBLAS state");
}
cublasCreate(&blasState->handles[device]);
blasState->current_handle = &blasState->handles[device];
}
void THCudaBlas_setHandle(THCState *state, int device)
{
THCBlasState *blas_state = state->blasState;
blas_state->current_handle = &blas_state->handles[device];
}
void THCudaBlas_setStream(THCState *state, int device, cudaStream_t stream)
{
THCublasCheck(cublasSetStream(state->blasState->handles[device], stream));
}
void THCudaBlas_swap(THCState *state, long n, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(cublasSswap(*state->blasState->current_handle, i_n, x, i_incx, y, i_incy));
return;
}
THError("Cublas_swap only supports n, incx and"
" incy upto signed integer limits: %d", INT_MAX);
}
void THCudaBlas_scal(THCState *state, long n, float a, float *x, long incx)
{
if(n == 1)
incx = 1;
if( (n <= INT_MAX) && (incx <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
THCublasCheck(cublasSscal(*state->blasState->current_handle, i_n, &a, x, i_incx));
return;
}
THError("Cublas_scal only supports n and incx "
"upto signed integer limits: %d", INT_MAX);
}
void THCudaBlas_copy(THCState *state, long n, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(cublasScopy(*state->blasState->current_handle, i_n, x, i_incx, y, i_incy));
return;
}
THError("Cublas_copy only supports n, incx and incy "
"upto signed integer limits: %d", INT_MAX);
}
void THCudaBlas_axpy(THCState *state, long n, float a, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(cublasSaxpy(*state->blasState->current_handle, i_n, &a, x, i_incx, y, i_incy));
return;
}
THError("Cublas_axpy only supports n, incx and incy "
"upto signed integer limits: %d", INT_MAX);
}
float THCudaBlas_dot(THCState *state, long n, float *x, long incx, float *y, long incy)
{
if(n == 1)
{
incx = 1;
incy = 1;
}
if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
THCublasCheck(cublasSdot(*state->blasState->current_handle, i_n, x, i_incx, y, i_incy, &result));
cudaDeviceSynchronize();
return result;
}
THError("Cublas_dot only supports n, incx and incy "
"upto signed integer limits: %d", INT_MAX);
return -1;
}
/* Level 2 */
void THCudaBlas_gemv(THCState *state, char trans, long m, long n, float alpha, float *a, long lda, float *x, long incx, float beta, float *y, long incy)
{
if(n == 1)
lda = m;
cublasOperation_t op;
if (trans == 't') op = CUBLAS_OP_T;
else if (trans == 'n') op = CUBLAS_OP_N;
else if (trans == 'c') op = CUBLAS_OP_C;
if( (m <= INT_MAX) && (n <= INT_MAX) &&
(lda > 0) && (lda <= INT_MAX) &&
(incx > 0) && (incx <= INT_MAX) &&
(incy > 0) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(cublasSgemv(*state->blasState->current_handle, op, i_m, i_n, &alpha, a, i_lda, x, i_incx, &beta, y, i_incy));
return;
}
THError("Cublas_gemv only supports m, n, lda, incx, incy"
"in the range 0 < [val] <= %d", INT_MAX);
}
void THCudaBlas_ger(THCState *state, long m, long n, float alpha, float *x, long incx, float *y, long incy, float *a, long lda)
{
if(n == 1)
lda = m;
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
THCublasCheck(cublasSger(*state->blasState->current_handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_ger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLd(char transa, char transb, long m, long n, long k, long *lda, long *ldb, long *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
if(n == 1)
*ldc = m;
if(transa_)
{
if(m == 1)
*lda = k;
}
else
{
if(k == 1)
*lda = m;
}
if(transb_)
{
if(k == 1)
*ldb = n;
}
else
{
if(n == 1)
*ldb = k;
}
}
/* Level 3 */
void THCudaBlas_gemm(THCState *state, char transa, char transb, long m, long n, long k, float alpha, float *a, long lda, float *b, long ldb, float beta, float *c, long ldc)
{
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_k = (int)k;
int i_lda = (int)lda;
int i_ldb = (int)ldb;
int i_ldc = (int)ldc;
THCublasCheck(cublasSgemm(*state->blasState->current_handle, opa, opb, i_m, i_n, i_k, &alpha, a, i_lda, b, i_ldb, &beta, c, i_ldc));
return;
}
THError("Cublas_gemm only supports m, n, k, lda, ldb, ldc"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_gemmBatched(THCState *state, char transa, char transb, long m, long n, long k,
float alpha, const float *a[], long lda, const float *b[], long ldb,
float beta, float *c[], long ldc, long batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_gemm only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLd(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
THCublasCheck(cublasSgemmBatched(*state->blasState->current_handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
}
|
774fa15e724f1eaefb794e7d4dcd491bff602b0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "data_structure/copyable_to_vec.h"
#include "intersect/accel/detail/clipped_triangle_impl.h"
#include "intersect/accel/sbvh/detail/generator.h"
#include "intersect/triangle_impl.h"
#include "lib/assert.h"
#include "lib/eigen_utils.h"
#include "lib/info/timer.h"
#include <boost/function_output_iterator.hpp>
#include <unordered_set>
namespace intersect {
namespace accel {
namespace sbvh {
template <ExecutionModel exec>
RefPerm<BVH<>>
SBVH<exec>::Generator::gen(const Settings &settings,
SpanSized<const Triangle> triangles_in) {
Timer start;
triangles_in_ = triangles_in;
HostVector<ClippedTriangle> triangles(triangles_in.size());
HostVector<unsigned> idxs(triangles.size());
for (unsigned i = 0; i < triangles.size(); ++i) {
triangles[i] = ClippedTriangle(triangles_in[i]);
idxs[i] = i;
}
HostVector<std::optional<unsigned>> final_idxs_for_dups(triangles.size(),
std::nullopt);
settings_ = settings;
HostVector<Node> nodes(1);
HostVector<unsigned> extra_idxs;
nodes[0] = create_node(triangles, idxs, final_idxs_for_dups, nodes,
extra_idxs, 0, true);
copy_to_vec(nodes, nodes_);
copy_to_vec(extra_idxs, extra_idxs_);
bvh::check_and_print_stats(nodes, settings.bvh_settings,
BVH<>::objects_vec_size);
if (settings.bvh_settings.print_stats) {
start.report("sbvh gen time");
}
#ifndef NDEBUG
std::unordered_set<unsigned> idxs_check(idxs.begin(), idxs.end());
debug_assert(idxs_check.size() == idxs.size());
if (!idxs.empty()) {
debug_assert(*std::min_element(idxs.begin(), idxs.end()) == 0);
debug_assert(*std::max_element(idxs.begin(), idxs.end()) ==
idxs.size() - 1);
}
#endif
return {
.ref =
{
.nodes = nodes_,
.extra_idxs = extra_idxs_,
.target_objects = settings.bvh_settings.target_objects,
},
.permutation = idxs,
};
}
template <ExecutionModel exec>
Node SBVH<exec>::Generator::create_node(
SpanSized<ClippedTriangle> triangles, SpanSized<unsigned> idxs,
SpanSized<std::optional<unsigned>> final_idxs_for_dups,
HostVector<Node> &nodes, HostVector<unsigned> &extra_idxs,
unsigned start_idx, bool is_root) {
always_assert(triangles.size() == idxs.size());
always_assert(triangles.size() == final_idxs_for_dups.size());
always_assert(!triangles.empty());
#ifndef NDEBUG
{
HostVector<unsigned> idxs_to_check(idxs.begin(), idxs.end());
std::sort(idxs_to_check.begin(), idxs_to_check.end());
debug_assert(
std::adjacent_find(idxs_to_check.begin(), idxs_to_check.end()) ==
idxs_to_check.end());
for (unsigned i = 0; i < triangles.size(); ++i) {
unsigned idx = idxs[i];
for (unsigned vert_idx = 0; vert_idx < 3; ++vert_idx) {
debug_assert((triangles_in_[idx].vertices[vert_idx].array() ==
triangles[i].triangle.vertices[vert_idx].array())
.all());
}
}
}
#endif
AABB overall_aabb = AABB::empty();
for (const auto &triangle : triangles) {
overall_aabb = overall_aabb.union_other(triangle.bounds);
}
if (is_root) {
root_surface_area_ = overall_aabb.surface_area();
}
SplitCandidate overall_best_split = {
.base_cost = std::numeric_limits<float>::max(),
};
for (unsigned axis = 0; axis < 3; ++axis) {
overall_best_split = ::min(
overall_best_split, best_object_split(triangles.as_const(), axis));
}
const float surface_area_intersection =
overall_best_split.item.get(tag_v<SplitType::Object>)
.intersection_surface_area;
if (settings_.use_spatial_splits &&
surface_area_intersection / root_surface_area_ >
settings_.overlap_threshold) {
for (unsigned axis = 0; axis < 3; ++axis) {
overall_best_split = ::min(
overall_best_split, best_spatial_split(triangles.as_const(), axis));
}
}
float surface_area = overall_aabb.surface_area();
float best_cost = overall_best_split.base_cost / surface_area +
settings_.bvh_settings.traversal_per_intersect_cost;
if (best_cost >= triangles.size()) {
HostVector<unsigned> actual_final_idxs(final_idxs_for_dups.size());
unsigned running_idx = start_idx;
for (unsigned i = 0; i < final_idxs_for_dups.size(); ++i) {
if (final_idxs_for_dups[i].has_value()) {
actual_final_idxs[i] = *final_idxs_for_dups[i];
} else {
actual_final_idxs[i] = running_idx;
++running_idx;
}
}
bool use_start_end = true;
for (unsigned i = 0; i < actual_final_idxs.size() - 1; ++i) {
use_start_end = use_start_end &&
actual_final_idxs[i + 1] > actual_final_idxs[i] &&
actual_final_idxs[i + 1] - actual_final_idxs[i] == 1;
}
auto item = [&]() -> Items {
if (!use_start_end) {
for (unsigned actual_idx : actual_final_idxs) {
extra_idxs.push_back(actual_idx);
}
return {
.start_end =
{
.start =
unsigned(extra_idxs.size() - actual_final_idxs.size()),
.end = unsigned(extra_idxs.size()),
},
.is_for_extra = true,
};
} else {
debug_assert(actual_final_idxs[0] + unsigned(triangles.size()) ==
actual_final_idxs[actual_final_idxs.size() - 1] + 1);
return {
.start_end =
{
.start = actual_final_idxs[0],
.end = actual_final_idxs[0] + unsigned(triangles.size()),
},
.is_for_extra = false,
};
}
}();
return {
.value = NodeValue(NodeValueRep{tag_v<NodeType::Items>, item}),
.aabb = overall_aabb,
};
}
nodes.resize(nodes.size() + 2);
unsigned left_idx = nodes.size() - 2;
unsigned right_idx = nodes.size() - 1;
HostVector<ClippedTriangle> old_triangles(triangles.begin(), triangles.end());
HostVector<unsigned> old_idxs(idxs.begin(), idxs.end());
HostVector<std::optional<unsigned>> old_final_idxs_for_dups(
final_idxs_for_dups.begin(), final_idxs_for_dups.end());
overall_best_split.item.visit_tagged([&](auto tag, const auto &split) {
if constexpr (tag == SplitType::Object) {
for (unsigned i = 0; i < triangles.size(); ++i) {
unsigned perm_idx = split.perm[i];
triangles[i] = old_triangles[perm_idx];
idxs[i] = old_idxs[perm_idx];
final_idxs_for_dups[i] = old_final_idxs_for_dups[perm_idx];
}
unsigned split_point = split.split_point;
unsigned num_in_order_before_split = 0;
for (unsigned i = 0; i < split_point; ++i) {
if (!final_idxs_for_dups[i].has_value()) {
++num_in_order_before_split;
}
}
nodes[left_idx] = create_node(triangles.slice_to(split_point),
idxs.slice_to(split_point),
final_idxs_for_dups.slice_to(split_point),
nodes, extra_idxs, start_idx, false);
nodes[right_idx] = create_node(
triangles.slice_from(split_point), idxs.slice_from(split_point),
final_idxs_for_dups.slice_from(split_point), nodes, extra_idxs,
start_idx + num_in_order_before_split, false);
} else {
always_assert(split.left_triangles.size() == split.left_bounds.size());
always_assert(split.right_triangles.size() == split.right_bounds.size());
for (unsigned i = 0; i < split.left_triangles.size(); ++i) {
unsigned perm_idx = split.left_triangles[i];
triangles[i] = old_triangles[perm_idx];
triangles[i].bounds = split.left_bounds[i];
idxs[i] = old_idxs[perm_idx];
final_idxs_for_dups[i] = old_final_idxs_for_dups[perm_idx];
}
unsigned split_point = split.left_triangles.size();
nodes[left_idx] = create_node(triangles.slice_to(split_point),
idxs.slice_to(split_point),
final_idxs_for_dups.slice_to(split_point),
nodes, extra_idxs, start_idx, false);
std::unordered_set<unsigned> dups;
std::set_intersection(
split.left_triangles.begin(), split.left_triangles.end(),
split.right_triangles.begin(), split.right_triangles.end(),
std::inserter(dups, dups.begin()));
std::unordered_map<unsigned, unsigned> dup_idx_to_right_idx;
for (unsigned i = 0; i < split.right_triangles.size(); ++i) {
unsigned actual_idx = split.right_triangles[i];
if (dups.contains(actual_idx)) {
dup_idx_to_right_idx.insert({old_idxs[actual_idx], i});
}
}
HostVector<ClippedTriangle> triangles_for_right(
split.right_triangles.size());
// just "enumerate"
HostVector<unsigned> idxs_for_right(split.right_triangles.size());
HostVector<std::optional<unsigned>> final_idxs_for_dups_for_right(
split.right_triangles.size());
for (unsigned i = 0; i < split.right_triangles.size(); ++i) {
unsigned perm_idx = split.right_triangles[i];
triangles_for_right[i] = old_triangles[perm_idx];
triangles_for_right[i].bounds = split.right_bounds[i];
idxs_for_right[i] = i;
final_idxs_for_dups_for_right[i] = old_final_idxs_for_dups[perm_idx];
}
unsigned num_in_order_before_split = 0;
for (unsigned i = 0; i < split.left_triangles.size(); ++i) {
// this is reordered by the previous call to create_node
// same with final_idxs_for_dups (below)
unsigned idx = idxs[i];
auto it = dup_idx_to_right_idx.find(idx);
if (it != dup_idx_to_right_idx.end()) {
unsigned right_idx = it->second;
debug_assert(final_idxs_for_dups_for_right[right_idx] ==
final_idxs_for_dups[i]);
if (!final_idxs_for_dups_for_right[right_idx].has_value()) {
final_idxs_for_dups_for_right[right_idx] =
start_idx + num_in_order_before_split;
}
}
if (!final_idxs_for_dups[i].has_value()) {
++num_in_order_before_split;
}
}
#ifndef NDEBUG
HostVector<Triangle> triangles_for_debug_in(triangles_for_right.size());
std::transform(triangles_for_right.begin(), triangles_for_right.end(),
triangles_for_debug_in.begin(),
[](const ClippedTriangle &tri) { return tri.triangle; });
auto old_in = triangles_in_;
triangles_in_ = triangles_for_debug_in;
#endif
nodes[right_idx] = create_node(
triangles_for_right, idxs_for_right, final_idxs_for_dups_for_right,
nodes, extra_idxs, start_idx + num_in_order_before_split, false);
#ifndef NDEBUG
triangles_in_ = old_in;
#endif
// we need to reorder idxs and final_idxs_for_dups because those
// will be used by the caller (level above in recursion)
unsigned overall_idx = split.left_triangles.size();
for (unsigned i = 0; i < split.right_triangles.size(); ++i) {
unsigned prev_step_idx = idxs_for_right[i];
if (dups.contains(split.right_triangles[prev_step_idx])) {
continue;
}
idxs[overall_idx] = old_idxs[split.right_triangles[prev_step_idx]];
final_idxs_for_dups[overall_idx] =
old_final_idxs_for_dups[split.right_triangles[prev_step_idx]];
++overall_idx;
}
debug_assert(overall_idx == triangles.size());
}
});
return {
.value = NodeValue(NodeValueRep{
tag_v<NodeType::Split>,
{
.left_idx = left_idx,
.right_idx = right_idx,
},
}),
.aabb = overall_aabb,
};
}
template <ExecutionModel exec>
SplitCandidate SBVH<exec>::Generator::best_object_split(
SpanSized<const ClippedTriangle> triangles_in, unsigned axis) {
HostVector<ClippedTriangle> triangles(triangles_in.begin(),
triangles_in.end());
always_assert(!triangles.empty());
auto sort_perm = sort_by_axis(triangles, axis);
// inclusive
HostVector<AABB> aabbs_backward(triangles.size());
AABB running_aabb_backward = AABB::empty();
for (unsigned i = triangles.size() - 1;
i != std::numeric_limits<unsigned>::max(); --i) {
running_aabb_backward =
running_aabb_backward.union_other(triangles[i].bounds);
aabbs_backward[i] = running_aabb_backward;
}
float best_base_cost = std::numeric_limits<float>::max();
float best_intersection_surface_area = 0.f;
unsigned best_split = 0;
// exclusive
AABB running_aabb = AABB::empty();
for (unsigned i = 0; i < triangles.size(); ++i) {
const auto &left_aabb = running_aabb;
const auto &right_aabb = aabbs_backward[i];
float intersection_surface_area =
left_aabb.intersection_other(right_aabb).surface_area();
float surface_area_left = left_aabb.surface_area();
float surface_area_right = right_aabb.surface_area();
float base_cost =
i * surface_area_left + (triangles.size() - i) * surface_area_right;
if (base_cost < best_base_cost) {
best_base_cost = base_cost;
best_intersection_surface_area = intersection_surface_area;
best_split = i;
}
running_aabb = running_aabb.union_other(triangles[i].bounds);
}
return {
.base_cost = best_base_cost,
.item =
{
tag_v<SplitType::Object>,
{
.perm = sort_perm,
.split_point = best_split,
.intersection_surface_area = best_intersection_surface_area,
},
},
};
}
template <ExecutionModel exec>
HostVector<unsigned>
SBVH<exec>::Generator::sort_by_axis(SpanSized<ClippedTriangle> triangles,
unsigned axis) {
struct ValueIdx {
float value;
unsigned idx;
};
HostVector<ValueIdx> to_sort(triangles.size());
for (unsigned i = 0; i < triangles.size(); ++i) {
to_sort[i] = {
// TODO: somehow use the triangle?
.value = triangles[i].bounds.centroid()[axis],
.idx = i,
};
}
std::sort(to_sort.begin(), to_sort.end(),
[](ValueIdx l, ValueIdx r) { return l.value < r.value; });
HostVector<unsigned> out(triangles.size());
HostVector<ClippedTriangle> old_triangles(triangles.begin(), triangles.end());
for (unsigned i = 0; i < triangles.size(); ++i) {
unsigned idx = to_sort[i].idx;
out[i] = idx;
triangles[i] = old_triangles[idx];
}
return out;
}
template <ExecutionModel exec>
SplitCandidate SBVH<exec>::Generator::best_spatial_split(
SpanSized<const ClippedTriangle> triangles, unsigned axis) {
always_assert(!triangles.empty());
AABB overall_aabb = AABB::empty();
for (const ClippedTriangle &triangle : triangles) {
overall_aabb = overall_aabb.union_other(triangle.bounds);
}
// TODO: make param?
unsigned num_divisions = triangles.size() * 2;
// doesn't include the furthest left and right edges
unsigned num_inside_edges = num_divisions - 1;
float total = overall_aabb.max_bound[axis] - overall_aabb.min_bound[axis];
struct BinInfo {
AABB aabb = AABB::empty();
unsigned entries = 0;
unsigned exits = 0;
};
HostVector<BinInfo> bin_infos(num_divisions);
HostVector<HostVector<unsigned>> triangles_crossing_edge(num_inside_edges);
auto get_split_point = [&](unsigned loc) {
float frac = float(loc) / num_divisions;
if (loc == num_divisions) {
// reduce float error (actually needed...)
return overall_aabb.max_bound[axis];
}
return frac * total + overall_aabb.min_bound[axis];
};
for (unsigned triangle_idx = 0; triangle_idx < triangles.size();
++triangle_idx) {
const ClippedTriangle &triangle = triangles[triangle_idx];
auto bounds = triangle.bounds;
float min_axis = bounds.min_bound[axis];
float max_axis = bounds.max_bound[axis];
debug_assert((bounds.min_bound.array() <= bounds.max_bound.array()).all());
float min_prop = (min_axis - overall_aabb.min_bound[axis]) / total;
float max_prop = (max_axis - overall_aabb.min_bound[axis]) / total;
// min on both to handle case where triangle is on max edge
unsigned min_loc = ::min(unsigned(::floor(min_prop * num_divisions)),
num_divisions - 1);
unsigned max_loc = ::min(unsigned(::floor(max_prop * num_divisions)),
num_divisions - 1);
// special case some floating point issues....
// maybe find a better solution later...
if (get_split_point(min_loc) > bounds.min_bound[axis]) {
debug_assert_assume(min_loc > 0);
--min_loc;
}
if (get_split_point(max_loc + 1) < bounds.max_bound[axis]) {
debug_assert_assume(max_loc < num_divisions - 1);
++max_loc;
}
if (min_loc != max_loc) {
if (get_split_point(max_loc) >= max_axis) {
--max_loc;
}
if (get_split_point(min_loc + 1) <= min_axis) {
++min_loc;
}
}
++bin_infos[min_loc].entries;
++bin_infos[max_loc].exits;
for (unsigned loc = min_loc; loc <= max_loc; ++loc) {
if (loc != max_loc) {
unsigned edge_after = loc;
triangles_crossing_edge[edge_after].push_back(triangle_idx);
}
AABB &loc_aabb = bin_infos[loc].aabb;
float split_left = get_split_point(loc);
float split_right = get_split_point(loc + 1);
loc_aabb = loc_aabb.union_other(
triangle.new_bounds(split_left, split_right, axis));
}
}
#ifndef NDEBUG
{
unsigned total_entries = 0;
unsigned total_exits = 0;
for (const BinInfo &bin_info : bin_infos) {
total_entries += bin_info.entries;
total_exits += bin_info.exits;
}
debug_assert(total_entries == triangles.size());
debug_assert(total_exits == triangles.size());
}
#endif
// inclusive
HostVector<AABB> aabbs_backward(bin_infos.size());
AABB running_aabb_backward = AABB::empty();
for (unsigned i = bin_infos.size() - 1;
i != std::numeric_limits<unsigned>::max(); --i) {
running_aabb_backward =
running_aabb_backward.union_other(bin_infos[i].aabb);
aabbs_backward[i] = running_aabb_backward;
}
float best_base_cost = std::numeric_limits<float>::max();
unsigned best_edge = 0;
std::unordered_set<unsigned> best_left_only_tris;
std::unordered_set<unsigned> best_right_only_tris;
unsigned total_left_overall = 0;
unsigned total_right_overall = triangles.size();
AABB running_aabb = AABB::empty();
for (unsigned edge = 0; edge < num_divisions - 1; ++edge) {
unsigned loc_left = edge;
unsigned loc_right = edge + 1;
total_left_overall += bin_infos[loc_left].entries;
total_right_overall -= bin_infos[loc_left].exits;
unsigned total_left = total_left_overall;
unsigned total_right = total_right_overall;
running_aabb = running_aabb.union_other(bin_infos[loc_left].aabb);
AABB left_aabb = running_aabb;
AABB right_aabb = aabbs_backward[loc_right];
float surface_area_left = left_aabb.surface_area();
float surface_area_right = right_aabb.surface_area();
float base_cost =
total_left * surface_area_left + total_right * surface_area_right;
std::unordered_set<unsigned> left_only_tris;
std::unordered_set<unsigned> right_only_tris;
SpanSized<const unsigned> triangle_idxs = triangles_crossing_edge[edge];
for (unsigned triangle_idx : triangle_idxs) {
const ClippedTriangle &triangle = triangles[triangle_idx];
const AABB left_only = left_aabb.union_other(triangle.bounds);
const AABB right_only = right_aabb.union_other(triangle.bounds);
float base_cost_left_only = total_left * left_only.surface_area() +
(total_right - 1) * surface_area_right;
float base_cost_right_only = (total_left - 1) * surface_area_left +
total_right * right_only.surface_area();
if (::min(base_cost_left_only, base_cost_right_only) < base_cost) {
if (base_cost_left_only < base_cost_right_only) {
total_right -= 1;
base_cost = base_cost_left_only;
surface_area_left = left_only.surface_area();
left_aabb = left_only;
left_only_tris.insert(triangle_idx);
} else {
total_left -= 1;
base_cost = base_cost_right_only;
surface_area_right = right_only.surface_area();
right_aabb = right_only;
right_only_tris.insert(triangle_idx);
}
}
}
if (base_cost < best_base_cost) {
best_base_cost = base_cost;
best_edge = edge;
best_left_only_tris = left_only_tris;
best_right_only_tris = right_only_tris;
}
}
debug_assert(best_base_cost != std::numeric_limits<float>::max());
// we need to to rebuild the AABBs because the unsplitting may
// result in overly large AABBs
AABB left_aabb = AABB::empty();
AABB right_aabb = AABB::empty();
HostVector<unsigned> left_triangles;
HostVector<unsigned> right_triangles;
HostVector<AABB> left_bounds;
HostVector<AABB> right_bounds;
for (unsigned triangle_idx = 0; triangle_idx < triangles.size();
++triangle_idx) {
const ClippedTriangle &triangle = triangles[triangle_idx];
if (best_left_only_tris.contains(triangle_idx)) {
left_aabb = left_aabb.union_other(triangle.bounds);
left_bounds.push_back(triangle.bounds);
left_triangles.push_back(triangle_idx);
continue;
}
if (best_right_only_tris.contains(triangle_idx)) {
right_aabb = right_aabb.union_other(triangle.bounds);
right_bounds.push_back(triangle.bounds);
right_triangles.push_back(triangle_idx);
continue;
}
float split_point = get_split_point(best_edge + 1);
bool to_left = triangle.bounds.min_bound[axis] < split_point;
bool to_right = triangle.bounds.max_bound[axis] > split_point;
debug_assert(to_left || to_right ||
(triangle.bounds.min_bound[axis] == split_point &&
triangle.bounds.max_bound[axis] == split_point));
bool on_split = !to_left && !to_right;
if (to_left || on_split) {
auto new_bounds = triangle.new_bounds(
std::numeric_limits<float>::lowest(), split_point, axis);
left_aabb = left_aabb.union_other(new_bounds);
left_bounds.push_back(new_bounds);
left_triangles.push_back(triangle_idx);
}
if (to_right) {
auto new_bounds = triangle.new_bounds(
split_point, std::numeric_limits<float>::max(), axis);
right_aabb = right_aabb.union_other(new_bounds);
right_bounds.push_back(new_bounds);
right_triangles.push_back(triangle_idx);
}
}
debug_assert(std::is_sorted(left_triangles.begin(), left_triangles.end()));
debug_assert(std::is_sorted(right_triangles.begin(), right_triangles.end()));
debug_assert(
std::adjacent_find(left_triangles.begin(), left_triangles.end()) ==
left_triangles.end());
debug_assert(
std::adjacent_find(right_triangles.begin(), right_triangles.end()) ==
right_triangles.end());
float actual_base_cost = left_triangles.size() * left_aabb.surface_area() +
right_triangles.size() * right_aabb.surface_area();
// hacky floating point compare...
debug_assert(actual_base_cost <=
best_base_cost + ::max(1e-8, 1e-8 * best_base_cost));
always_assert(left_triangles.size() == left_bounds.size());
always_assert(right_triangles.size() == right_bounds.size());
return {
.base_cost = actual_base_cost,
.item =
{
tag_v<SplitType::Spatial>,
{
.left_triangles = left_triangles,
.right_triangles = right_triangles,
.left_bounds = left_bounds,
.right_bounds = right_bounds,
.left_aabb = left_aabb,
.right_aabb = right_aabb,
},
},
};
}
template class SBVH<ExecutionModel::CPU>::Generator;
#ifndef CPU_ONLY
template class SBVH<ExecutionModel::GPU>::Generator;
#endif
} // namespace sbvh
} // namespace accel
} // namespace intersect
| 774fa15e724f1eaefb794e7d4dcd491bff602b0b.cu | #include "data_structure/copyable_to_vec.h"
#include "intersect/accel/detail/clipped_triangle_impl.h"
#include "intersect/accel/sbvh/detail/generator.h"
#include "intersect/triangle_impl.h"
#include "lib/assert.h"
#include "lib/eigen_utils.h"
#include "lib/info/timer.h"
#include <boost/function_output_iterator.hpp>
#include <unordered_set>
namespace intersect {
namespace accel {
namespace sbvh {
template <ExecutionModel exec>
RefPerm<BVH<>>
SBVH<exec>::Generator::gen(const Settings &settings,
SpanSized<const Triangle> triangles_in) {
Timer start;
triangles_in_ = triangles_in;
HostVector<ClippedTriangle> triangles(triangles_in.size());
HostVector<unsigned> idxs(triangles.size());
for (unsigned i = 0; i < triangles.size(); ++i) {
triangles[i] = ClippedTriangle(triangles_in[i]);
idxs[i] = i;
}
HostVector<std::optional<unsigned>> final_idxs_for_dups(triangles.size(),
std::nullopt);
settings_ = settings;
HostVector<Node> nodes(1);
HostVector<unsigned> extra_idxs;
nodes[0] = create_node(triangles, idxs, final_idxs_for_dups, nodes,
extra_idxs, 0, true);
copy_to_vec(nodes, nodes_);
copy_to_vec(extra_idxs, extra_idxs_);
bvh::check_and_print_stats(nodes, settings.bvh_settings,
BVH<>::objects_vec_size);
if (settings.bvh_settings.print_stats) {
start.report("sbvh gen time");
}
#ifndef NDEBUG
std::unordered_set<unsigned> idxs_check(idxs.begin(), idxs.end());
debug_assert(idxs_check.size() == idxs.size());
if (!idxs.empty()) {
debug_assert(*std::min_element(idxs.begin(), idxs.end()) == 0);
debug_assert(*std::max_element(idxs.begin(), idxs.end()) ==
idxs.size() - 1);
}
#endif
return {
.ref =
{
.nodes = nodes_,
.extra_idxs = extra_idxs_,
.target_objects = settings.bvh_settings.target_objects,
},
.permutation = idxs,
};
}
template <ExecutionModel exec>
Node SBVH<exec>::Generator::create_node(
SpanSized<ClippedTriangle> triangles, SpanSized<unsigned> idxs,
SpanSized<std::optional<unsigned>> final_idxs_for_dups,
HostVector<Node> &nodes, HostVector<unsigned> &extra_idxs,
unsigned start_idx, bool is_root) {
always_assert(triangles.size() == idxs.size());
always_assert(triangles.size() == final_idxs_for_dups.size());
always_assert(!triangles.empty());
#ifndef NDEBUG
{
HostVector<unsigned> idxs_to_check(idxs.begin(), idxs.end());
std::sort(idxs_to_check.begin(), idxs_to_check.end());
debug_assert(
std::adjacent_find(idxs_to_check.begin(), idxs_to_check.end()) ==
idxs_to_check.end());
for (unsigned i = 0; i < triangles.size(); ++i) {
unsigned idx = idxs[i];
for (unsigned vert_idx = 0; vert_idx < 3; ++vert_idx) {
debug_assert((triangles_in_[idx].vertices[vert_idx].array() ==
triangles[i].triangle.vertices[vert_idx].array())
.all());
}
}
}
#endif
AABB overall_aabb = AABB::empty();
for (const auto &triangle : triangles) {
overall_aabb = overall_aabb.union_other(triangle.bounds);
}
if (is_root) {
root_surface_area_ = overall_aabb.surface_area();
}
SplitCandidate overall_best_split = {
.base_cost = std::numeric_limits<float>::max(),
};
for (unsigned axis = 0; axis < 3; ++axis) {
overall_best_split = std::min(
overall_best_split, best_object_split(triangles.as_const(), axis));
}
const float surface_area_intersection =
overall_best_split.item.get(tag_v<SplitType::Object>)
.intersection_surface_area;
if (settings_.use_spatial_splits &&
surface_area_intersection / root_surface_area_ >
settings_.overlap_threshold) {
for (unsigned axis = 0; axis < 3; ++axis) {
overall_best_split = std::min(
overall_best_split, best_spatial_split(triangles.as_const(), axis));
}
}
float surface_area = overall_aabb.surface_area();
float best_cost = overall_best_split.base_cost / surface_area +
settings_.bvh_settings.traversal_per_intersect_cost;
if (best_cost >= triangles.size()) {
HostVector<unsigned> actual_final_idxs(final_idxs_for_dups.size());
unsigned running_idx = start_idx;
for (unsigned i = 0; i < final_idxs_for_dups.size(); ++i) {
if (final_idxs_for_dups[i].has_value()) {
actual_final_idxs[i] = *final_idxs_for_dups[i];
} else {
actual_final_idxs[i] = running_idx;
++running_idx;
}
}
bool use_start_end = true;
for (unsigned i = 0; i < actual_final_idxs.size() - 1; ++i) {
use_start_end = use_start_end &&
actual_final_idxs[i + 1] > actual_final_idxs[i] &&
actual_final_idxs[i + 1] - actual_final_idxs[i] == 1;
}
auto item = [&]() -> Items {
if (!use_start_end) {
for (unsigned actual_idx : actual_final_idxs) {
extra_idxs.push_back(actual_idx);
}
return {
.start_end =
{
.start =
unsigned(extra_idxs.size() - actual_final_idxs.size()),
.end = unsigned(extra_idxs.size()),
},
.is_for_extra = true,
};
} else {
debug_assert(actual_final_idxs[0] + unsigned(triangles.size()) ==
actual_final_idxs[actual_final_idxs.size() - 1] + 1);
return {
.start_end =
{
.start = actual_final_idxs[0],
.end = actual_final_idxs[0] + unsigned(triangles.size()),
},
.is_for_extra = false,
};
}
}();
return {
.value = NodeValue(NodeValueRep{tag_v<NodeType::Items>, item}),
.aabb = overall_aabb,
};
}
nodes.resize(nodes.size() + 2);
unsigned left_idx = nodes.size() - 2;
unsigned right_idx = nodes.size() - 1;
HostVector<ClippedTriangle> old_triangles(triangles.begin(), triangles.end());
HostVector<unsigned> old_idxs(idxs.begin(), idxs.end());
HostVector<std::optional<unsigned>> old_final_idxs_for_dups(
final_idxs_for_dups.begin(), final_idxs_for_dups.end());
overall_best_split.item.visit_tagged([&](auto tag, const auto &split) {
if constexpr (tag == SplitType::Object) {
for (unsigned i = 0; i < triangles.size(); ++i) {
unsigned perm_idx = split.perm[i];
triangles[i] = old_triangles[perm_idx];
idxs[i] = old_idxs[perm_idx];
final_idxs_for_dups[i] = old_final_idxs_for_dups[perm_idx];
}
unsigned split_point = split.split_point;
unsigned num_in_order_before_split = 0;
for (unsigned i = 0; i < split_point; ++i) {
if (!final_idxs_for_dups[i].has_value()) {
++num_in_order_before_split;
}
}
nodes[left_idx] = create_node(triangles.slice_to(split_point),
idxs.slice_to(split_point),
final_idxs_for_dups.slice_to(split_point),
nodes, extra_idxs, start_idx, false);
nodes[right_idx] = create_node(
triangles.slice_from(split_point), idxs.slice_from(split_point),
final_idxs_for_dups.slice_from(split_point), nodes, extra_idxs,
start_idx + num_in_order_before_split, false);
} else {
always_assert(split.left_triangles.size() == split.left_bounds.size());
always_assert(split.right_triangles.size() == split.right_bounds.size());
for (unsigned i = 0; i < split.left_triangles.size(); ++i) {
unsigned perm_idx = split.left_triangles[i];
triangles[i] = old_triangles[perm_idx];
triangles[i].bounds = split.left_bounds[i];
idxs[i] = old_idxs[perm_idx];
final_idxs_for_dups[i] = old_final_idxs_for_dups[perm_idx];
}
unsigned split_point = split.left_triangles.size();
nodes[left_idx] = create_node(triangles.slice_to(split_point),
idxs.slice_to(split_point),
final_idxs_for_dups.slice_to(split_point),
nodes, extra_idxs, start_idx, false);
std::unordered_set<unsigned> dups;
std::set_intersection(
split.left_triangles.begin(), split.left_triangles.end(),
split.right_triangles.begin(), split.right_triangles.end(),
std::inserter(dups, dups.begin()));
std::unordered_map<unsigned, unsigned> dup_idx_to_right_idx;
for (unsigned i = 0; i < split.right_triangles.size(); ++i) {
unsigned actual_idx = split.right_triangles[i];
if (dups.contains(actual_idx)) {
dup_idx_to_right_idx.insert({old_idxs[actual_idx], i});
}
}
HostVector<ClippedTriangle> triangles_for_right(
split.right_triangles.size());
// just "enumerate"
HostVector<unsigned> idxs_for_right(split.right_triangles.size());
HostVector<std::optional<unsigned>> final_idxs_for_dups_for_right(
split.right_triangles.size());
for (unsigned i = 0; i < split.right_triangles.size(); ++i) {
unsigned perm_idx = split.right_triangles[i];
triangles_for_right[i] = old_triangles[perm_idx];
triangles_for_right[i].bounds = split.right_bounds[i];
idxs_for_right[i] = i;
final_idxs_for_dups_for_right[i] = old_final_idxs_for_dups[perm_idx];
}
unsigned num_in_order_before_split = 0;
for (unsigned i = 0; i < split.left_triangles.size(); ++i) {
// this is reordered by the previous call to create_node
// same with final_idxs_for_dups (below)
unsigned idx = idxs[i];
auto it = dup_idx_to_right_idx.find(idx);
if (it != dup_idx_to_right_idx.end()) {
unsigned right_idx = it->second;
debug_assert(final_idxs_for_dups_for_right[right_idx] ==
final_idxs_for_dups[i]);
if (!final_idxs_for_dups_for_right[right_idx].has_value()) {
final_idxs_for_dups_for_right[right_idx] =
start_idx + num_in_order_before_split;
}
}
if (!final_idxs_for_dups[i].has_value()) {
++num_in_order_before_split;
}
}
#ifndef NDEBUG
HostVector<Triangle> triangles_for_debug_in(triangles_for_right.size());
std::transform(triangles_for_right.begin(), triangles_for_right.end(),
triangles_for_debug_in.begin(),
[](const ClippedTriangle &tri) { return tri.triangle; });
auto old_in = triangles_in_;
triangles_in_ = triangles_for_debug_in;
#endif
nodes[right_idx] = create_node(
triangles_for_right, idxs_for_right, final_idxs_for_dups_for_right,
nodes, extra_idxs, start_idx + num_in_order_before_split, false);
#ifndef NDEBUG
triangles_in_ = old_in;
#endif
// we need to reorder idxs and final_idxs_for_dups because those
// will be used by the caller (level above in recursion)
unsigned overall_idx = split.left_triangles.size();
for (unsigned i = 0; i < split.right_triangles.size(); ++i) {
unsigned prev_step_idx = idxs_for_right[i];
if (dups.contains(split.right_triangles[prev_step_idx])) {
continue;
}
idxs[overall_idx] = old_idxs[split.right_triangles[prev_step_idx]];
final_idxs_for_dups[overall_idx] =
old_final_idxs_for_dups[split.right_triangles[prev_step_idx]];
++overall_idx;
}
debug_assert(overall_idx == triangles.size());
}
});
return {
.value = NodeValue(NodeValueRep{
tag_v<NodeType::Split>,
{
.left_idx = left_idx,
.right_idx = right_idx,
},
}),
.aabb = overall_aabb,
};
}
template <ExecutionModel exec>
SplitCandidate SBVH<exec>::Generator::best_object_split(
SpanSized<const ClippedTriangle> triangles_in, unsigned axis) {
HostVector<ClippedTriangle> triangles(triangles_in.begin(),
triangles_in.end());
always_assert(!triangles.empty());
auto sort_perm = sort_by_axis(triangles, axis);
// inclusive
HostVector<AABB> aabbs_backward(triangles.size());
AABB running_aabb_backward = AABB::empty();
for (unsigned i = triangles.size() - 1;
i != std::numeric_limits<unsigned>::max(); --i) {
running_aabb_backward =
running_aabb_backward.union_other(triangles[i].bounds);
aabbs_backward[i] = running_aabb_backward;
}
float best_base_cost = std::numeric_limits<float>::max();
float best_intersection_surface_area = 0.f;
unsigned best_split = 0;
// exclusive
AABB running_aabb = AABB::empty();
for (unsigned i = 0; i < triangles.size(); ++i) {
const auto &left_aabb = running_aabb;
const auto &right_aabb = aabbs_backward[i];
float intersection_surface_area =
left_aabb.intersection_other(right_aabb).surface_area();
float surface_area_left = left_aabb.surface_area();
float surface_area_right = right_aabb.surface_area();
float base_cost =
i * surface_area_left + (triangles.size() - i) * surface_area_right;
if (base_cost < best_base_cost) {
best_base_cost = base_cost;
best_intersection_surface_area = intersection_surface_area;
best_split = i;
}
running_aabb = running_aabb.union_other(triangles[i].bounds);
}
return {
.base_cost = best_base_cost,
.item =
{
tag_v<SplitType::Object>,
{
.perm = sort_perm,
.split_point = best_split,
.intersection_surface_area = best_intersection_surface_area,
},
},
};
}
template <ExecutionModel exec>
HostVector<unsigned>
SBVH<exec>::Generator::sort_by_axis(SpanSized<ClippedTriangle> triangles,
unsigned axis) {
struct ValueIdx {
float value;
unsigned idx;
};
HostVector<ValueIdx> to_sort(triangles.size());
for (unsigned i = 0; i < triangles.size(); ++i) {
to_sort[i] = {
// TODO: somehow use the triangle?
.value = triangles[i].bounds.centroid()[axis],
.idx = i,
};
}
std::sort(to_sort.begin(), to_sort.end(),
[](ValueIdx l, ValueIdx r) { return l.value < r.value; });
HostVector<unsigned> out(triangles.size());
HostVector<ClippedTriangle> old_triangles(triangles.begin(), triangles.end());
for (unsigned i = 0; i < triangles.size(); ++i) {
unsigned idx = to_sort[i].idx;
out[i] = idx;
triangles[i] = old_triangles[idx];
}
return out;
}
template <ExecutionModel exec>
SplitCandidate SBVH<exec>::Generator::best_spatial_split(
SpanSized<const ClippedTriangle> triangles, unsigned axis) {
always_assert(!triangles.empty());
AABB overall_aabb = AABB::empty();
for (const ClippedTriangle &triangle : triangles) {
overall_aabb = overall_aabb.union_other(triangle.bounds);
}
// TODO: make param?
unsigned num_divisions = triangles.size() * 2;
// doesn't include the furthest left and right edges
unsigned num_inside_edges = num_divisions - 1;
float total = overall_aabb.max_bound[axis] - overall_aabb.min_bound[axis];
struct BinInfo {
AABB aabb = AABB::empty();
unsigned entries = 0;
unsigned exits = 0;
};
HostVector<BinInfo> bin_infos(num_divisions);
HostVector<HostVector<unsigned>> triangles_crossing_edge(num_inside_edges);
auto get_split_point = [&](unsigned loc) {
float frac = float(loc) / num_divisions;
if (loc == num_divisions) {
// reduce float error (actually needed...)
return overall_aabb.max_bound[axis];
}
return frac * total + overall_aabb.min_bound[axis];
};
for (unsigned triangle_idx = 0; triangle_idx < triangles.size();
++triangle_idx) {
const ClippedTriangle &triangle = triangles[triangle_idx];
auto bounds = triangle.bounds;
float min_axis = bounds.min_bound[axis];
float max_axis = bounds.max_bound[axis];
debug_assert((bounds.min_bound.array() <= bounds.max_bound.array()).all());
float min_prop = (min_axis - overall_aabb.min_bound[axis]) / total;
float max_prop = (max_axis - overall_aabb.min_bound[axis]) / total;
// min on both to handle case where triangle is on max edge
unsigned min_loc = std::min(unsigned(std::floor(min_prop * num_divisions)),
num_divisions - 1);
unsigned max_loc = std::min(unsigned(std::floor(max_prop * num_divisions)),
num_divisions - 1);
// special case some floating point issues....
// maybe find a better solution later...
if (get_split_point(min_loc) > bounds.min_bound[axis]) {
debug_assert_assume(min_loc > 0);
--min_loc;
}
if (get_split_point(max_loc + 1) < bounds.max_bound[axis]) {
debug_assert_assume(max_loc < num_divisions - 1);
++max_loc;
}
if (min_loc != max_loc) {
if (get_split_point(max_loc) >= max_axis) {
--max_loc;
}
if (get_split_point(min_loc + 1) <= min_axis) {
++min_loc;
}
}
++bin_infos[min_loc].entries;
++bin_infos[max_loc].exits;
for (unsigned loc = min_loc; loc <= max_loc; ++loc) {
if (loc != max_loc) {
unsigned edge_after = loc;
triangles_crossing_edge[edge_after].push_back(triangle_idx);
}
AABB &loc_aabb = bin_infos[loc].aabb;
float split_left = get_split_point(loc);
float split_right = get_split_point(loc + 1);
loc_aabb = loc_aabb.union_other(
triangle.new_bounds(split_left, split_right, axis));
}
}
#ifndef NDEBUG
{
unsigned total_entries = 0;
unsigned total_exits = 0;
for (const BinInfo &bin_info : bin_infos) {
total_entries += bin_info.entries;
total_exits += bin_info.exits;
}
debug_assert(total_entries == triangles.size());
debug_assert(total_exits == triangles.size());
}
#endif
// inclusive
HostVector<AABB> aabbs_backward(bin_infos.size());
AABB running_aabb_backward = AABB::empty();
for (unsigned i = bin_infos.size() - 1;
i != std::numeric_limits<unsigned>::max(); --i) {
running_aabb_backward =
running_aabb_backward.union_other(bin_infos[i].aabb);
aabbs_backward[i] = running_aabb_backward;
}
float best_base_cost = std::numeric_limits<float>::max();
unsigned best_edge = 0;
std::unordered_set<unsigned> best_left_only_tris;
std::unordered_set<unsigned> best_right_only_tris;
unsigned total_left_overall = 0;
unsigned total_right_overall = triangles.size();
AABB running_aabb = AABB::empty();
for (unsigned edge = 0; edge < num_divisions - 1; ++edge) {
unsigned loc_left = edge;
unsigned loc_right = edge + 1;
total_left_overall += bin_infos[loc_left].entries;
total_right_overall -= bin_infos[loc_left].exits;
unsigned total_left = total_left_overall;
unsigned total_right = total_right_overall;
running_aabb = running_aabb.union_other(bin_infos[loc_left].aabb);
AABB left_aabb = running_aabb;
AABB right_aabb = aabbs_backward[loc_right];
float surface_area_left = left_aabb.surface_area();
float surface_area_right = right_aabb.surface_area();
float base_cost =
total_left * surface_area_left + total_right * surface_area_right;
std::unordered_set<unsigned> left_only_tris;
std::unordered_set<unsigned> right_only_tris;
SpanSized<const unsigned> triangle_idxs = triangles_crossing_edge[edge];
for (unsigned triangle_idx : triangle_idxs) {
const ClippedTriangle &triangle = triangles[triangle_idx];
const AABB left_only = left_aabb.union_other(triangle.bounds);
const AABB right_only = right_aabb.union_other(triangle.bounds);
float base_cost_left_only = total_left * left_only.surface_area() +
(total_right - 1) * surface_area_right;
float base_cost_right_only = (total_left - 1) * surface_area_left +
total_right * right_only.surface_area();
if (std::min(base_cost_left_only, base_cost_right_only) < base_cost) {
if (base_cost_left_only < base_cost_right_only) {
total_right -= 1;
base_cost = base_cost_left_only;
surface_area_left = left_only.surface_area();
left_aabb = left_only;
left_only_tris.insert(triangle_idx);
} else {
total_left -= 1;
base_cost = base_cost_right_only;
surface_area_right = right_only.surface_area();
right_aabb = right_only;
right_only_tris.insert(triangle_idx);
}
}
}
if (base_cost < best_base_cost) {
best_base_cost = base_cost;
best_edge = edge;
best_left_only_tris = left_only_tris;
best_right_only_tris = right_only_tris;
}
}
debug_assert(best_base_cost != std::numeric_limits<float>::max());
// we need to to rebuild the AABBs because the unsplitting may
// result in overly large AABBs
AABB left_aabb = AABB::empty();
AABB right_aabb = AABB::empty();
HostVector<unsigned> left_triangles;
HostVector<unsigned> right_triangles;
HostVector<AABB> left_bounds;
HostVector<AABB> right_bounds;
for (unsigned triangle_idx = 0; triangle_idx < triangles.size();
++triangle_idx) {
const ClippedTriangle &triangle = triangles[triangle_idx];
if (best_left_only_tris.contains(triangle_idx)) {
left_aabb = left_aabb.union_other(triangle.bounds);
left_bounds.push_back(triangle.bounds);
left_triangles.push_back(triangle_idx);
continue;
}
if (best_right_only_tris.contains(triangle_idx)) {
right_aabb = right_aabb.union_other(triangle.bounds);
right_bounds.push_back(triangle.bounds);
right_triangles.push_back(triangle_idx);
continue;
}
float split_point = get_split_point(best_edge + 1);
bool to_left = triangle.bounds.min_bound[axis] < split_point;
bool to_right = triangle.bounds.max_bound[axis] > split_point;
debug_assert(to_left || to_right ||
(triangle.bounds.min_bound[axis] == split_point &&
triangle.bounds.max_bound[axis] == split_point));
bool on_split = !to_left && !to_right;
if (to_left || on_split) {
auto new_bounds = triangle.new_bounds(
std::numeric_limits<float>::lowest(), split_point, axis);
left_aabb = left_aabb.union_other(new_bounds);
left_bounds.push_back(new_bounds);
left_triangles.push_back(triangle_idx);
}
if (to_right) {
auto new_bounds = triangle.new_bounds(
split_point, std::numeric_limits<float>::max(), axis);
right_aabb = right_aabb.union_other(new_bounds);
right_bounds.push_back(new_bounds);
right_triangles.push_back(triangle_idx);
}
}
debug_assert(std::is_sorted(left_triangles.begin(), left_triangles.end()));
debug_assert(std::is_sorted(right_triangles.begin(), right_triangles.end()));
debug_assert(
std::adjacent_find(left_triangles.begin(), left_triangles.end()) ==
left_triangles.end());
debug_assert(
std::adjacent_find(right_triangles.begin(), right_triangles.end()) ==
right_triangles.end());
float actual_base_cost = left_triangles.size() * left_aabb.surface_area() +
right_triangles.size() * right_aabb.surface_area();
// hacky floating point compare...
debug_assert(actual_base_cost <=
best_base_cost + std::max(1e-8, 1e-8 * best_base_cost));
always_assert(left_triangles.size() == left_bounds.size());
always_assert(right_triangles.size() == right_bounds.size());
return {
.base_cost = actual_base_cost,
.item =
{
tag_v<SplitType::Spatial>,
{
.left_triangles = left_triangles,
.right_triangles = right_triangles,
.left_bounds = left_bounds,
.right_bounds = right_bounds,
.left_aabb = left_aabb,
.right_aabb = right_aabb,
},
},
};
}
template class SBVH<ExecutionModel::CPU>::Generator;
#ifndef CPU_ONLY
template class SBVH<ExecutionModel::GPU>::Generator;
#endif
} // namespace sbvh
} // namespace accel
} // namespace intersect
|
bd30981dc52452e8ab795adffea079f971cfb798.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EXAMPLE OF MAPPING THREADS TO MULTIDIMENSIONAL DATA (BLUR IMAGE): CHAPTER 3
*
* WORK IN PROGRES...
*
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include"lodepng.h"
#define CHECK_ERROR(call) { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define BLUR_SIZE 1 // 3x3 patch --> 2*BLUR_SIZE = number of pixels for each side of patch + 1
#define CHANNEL 4
__global__
void blurKernel(unsigned char *Pin, unsigned char *Pout, int width, int height) {
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
// check that only the threads with both Row and Col values are in within range
if ( Col < width && Row < height) {
int pixVal = 0;
int pixels = 0;
// Get the average of the surrounding BLUR_SIZE x BLUR_SIZE box
// for each pixel of the patch
for(int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1; blurRow++){
for(int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1; blurCol++){
int curRow = Row + blurRow;
int curCol = Col + blurCol;
//verify we have a valid image pixel
if(curRow > -1 && curRow < height && curCol > -1 && curCol < width){
pixVal += Pin[curRow * width + curCol];
pixels++;
}
}
}
//write our new pixel value out
Pout[(Row * width + Col) * CHANNEL] = (unsigned char)(pixVal/pixels);
Pout[(Row * width + Col) * CHANNEL + 1] = (unsigned char)(pixVal/pixels);
Pout[(Row * width + Col) * CHANNEL + 2] = (unsigned char)(pixVal/pixels);
Pout[(Row * width + Col) * CHANNEL + 3] = 255;
}
}
void blur(unsigned char *h_Pin, unsigned char *h_Pout, int m, int n) {
int size = (m*n*4)*sizeof(unsigned char);
unsigned char *d_Pin, *d_Pout;
//1. Allocate global memory on the device for d_Pin and d_Pout
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(hipMalloc((void**)&d_Pin, size));
CHECK_ERROR(hipMalloc((void**)&d_Pout, size));
// copy h_Pin to device memory
hipMemcpy(d_Pin, h_Pin, size, hipMemcpyHostToDevice);
//2. Kernel launch code - with 256 threads per block
dim3 dimGrid(ceil(m / 16.0),ceil(n / 16.0), 1);
dim3 dimBlock(16.0, 16.0, 1);
hipLaunchKernelGGL(( blurKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Pin, d_Pout, m, n);
//3. copy d_Pout from the device memory
hipMemcpy(h_Pout, d_Pout, size, hipMemcpyDeviceToHost);
// Free device vectors
hipFree(d_Pin);
hipFree(d_Pout);
}
/*
Decode from disk to raw pixels
*/
unsigned char* decodeOneStep(const char* filename)
{
unsigned error;
unsigned char* image;
unsigned width, height;
error = lodepng_decode32_file(&image, &width, &height, filename);
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
return image;
}
/*
Encode from raw pixels to disk with a single function call
The image argument has width * height RGBA pixels or width * height * 4 bytes
*/
void encodeOneStep(const char* filename, unsigned char* image, int width, int height)
{
/*Encode the image*/
unsigned error = lodepng_encode32_file(filename, image, width, height);
/*if there's an error, display it*/
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
}
int main(int argc, char *argv[]) {
/* argv[1] must be the name of the image file */
if (argc != 2) {
printf("Usage: ./<executable_file>.x <name_of_image_file>\n");
exit(1);
}
const char *filename = argv[1];
// create host vectors
unsigned char *h_Pin, *h_Pout;
int m = 512; // track the pixel in x direction
int n = 512; // track the pixel in y direction
// allocate memory for host vectors
h_Pin = (unsigned char*)malloc(sizeof(unsigned char)*(n*m));
h_Pout = (unsigned char*)malloc(sizeof(unsigned char)*(n*m*4));
// decode the .png image
printf("decoding image...\n");
h_Pin = decodeOneStep(filename);
printf("blurConversion...\n");
//GpuTimer timer;
//timer.Start();
blur(h_Pin, h_Pout, m, n);
//timer.Stop();
printf("encoding converted image...\n");
encodeOneStep("blurImage.png", h_Pout, m, n);
printf("ok conversion completed with success!\n");
// Free host memory
free(h_Pin);
free(h_Pout);
return 0;
}
| bd30981dc52452e8ab795adffea079f971cfb798.cu | /*
* EXAMPLE OF MAPPING THREADS TO MULTIDIMENSIONAL DATA (BLUR IMAGE): CHAPTER 3
*
* WORK IN PROGRES...
*
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include"lodepng.h"
#define CHECK_ERROR(call) { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define BLUR_SIZE 1 // 3x3 patch --> 2*BLUR_SIZE = number of pixels for each side of patch + 1
#define CHANNEL 4
__global__
void blurKernel(unsigned char *Pin, unsigned char *Pout, int width, int height) {
int Col = blockDim.x * blockIdx.x + threadIdx.x;
int Row = blockDim.y * blockIdx.y + threadIdx.y;
// check that only the threads with both Row and Col values are in within range
if ( Col < width && Row < height) {
int pixVal = 0;
int pixels = 0;
// Get the average of the surrounding BLUR_SIZE x BLUR_SIZE box
// for each pixel of the patch
for(int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1; blurRow++){
for(int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1; blurCol++){
int curRow = Row + blurRow;
int curCol = Col + blurCol;
//verify we have a valid image pixel
if(curRow > -1 && curRow < height && curCol > -1 && curCol < width){
pixVal += Pin[curRow * width + curCol];
pixels++;
}
}
}
//write our new pixel value out
Pout[(Row * width + Col) * CHANNEL] = (unsigned char)(pixVal/pixels);
Pout[(Row * width + Col) * CHANNEL + 1] = (unsigned char)(pixVal/pixels);
Pout[(Row * width + Col) * CHANNEL + 2] = (unsigned char)(pixVal/pixels);
Pout[(Row * width + Col) * CHANNEL + 3] = 255;
}
}
void blur(unsigned char *h_Pin, unsigned char *h_Pout, int m, int n) {
int size = (m*n*4)*sizeof(unsigned char);
unsigned char *d_Pin, *d_Pout;
//1. Allocate global memory on the device for d_Pin and d_Pout
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(cudaMalloc((void**)&d_Pin, size));
CHECK_ERROR(cudaMalloc((void**)&d_Pout, size));
// copy h_Pin to device memory
cudaMemcpy(d_Pin, h_Pin, size, cudaMemcpyHostToDevice);
//2. Kernel launch code - with 256 threads per block
dim3 dimGrid(ceil(m / 16.0),ceil(n / 16.0), 1);
dim3 dimBlock(16.0, 16.0, 1);
blurKernel<<<dimGrid, dimBlock>>>(d_Pin, d_Pout, m, n);
//3. copy d_Pout from the device memory
cudaMemcpy(h_Pout, d_Pout, size, cudaMemcpyDeviceToHost);
// Free device vectors
cudaFree(d_Pin);
cudaFree(d_Pout);
}
/*
Decode from disk to raw pixels
*/
unsigned char* decodeOneStep(const char* filename)
{
unsigned error;
unsigned char* image;
unsigned width, height;
error = lodepng_decode32_file(&image, &width, &height, filename);
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
return image;
}
/*
Encode from raw pixels to disk with a single function call
The image argument has width * height RGBA pixels or width * height * 4 bytes
*/
void encodeOneStep(const char* filename, unsigned char* image, int width, int height)
{
/*Encode the image*/
unsigned error = lodepng_encode32_file(filename, image, width, height);
/*if there's an error, display it*/
if(error) printf("error %u: %s\n", error, lodepng_error_text(error));
}
int main(int argc, char *argv[]) {
/* argv[1] must be the name of the image file */
if (argc != 2) {
printf("Usage: ./<executable_file>.x <name_of_image_file>\n");
exit(1);
}
const char *filename = argv[1];
// create host vectors
unsigned char *h_Pin, *h_Pout;
int m = 512; // track the pixel in x direction
int n = 512; // track the pixel in y direction
// allocate memory for host vectors
h_Pin = (unsigned char*)malloc(sizeof(unsigned char)*(n*m));
h_Pout = (unsigned char*)malloc(sizeof(unsigned char)*(n*m*4));
// decode the .png image
printf("decoding image...\n");
h_Pin = decodeOneStep(filename);
printf("blurConversion...\n");
//GpuTimer timer;
//timer.Start();
blur(h_Pin, h_Pout, m, n);
//timer.Stop();
printf("encoding converted image...\n");
encodeOneStep("blurImage.png", h_Pout, m, n);
printf("ok conversion completed with success!\n");
// Free host memory
free(h_Pin);
free(h_Pout);
return 0;
}
|
4270bdcb95d79de0198dbe260772438a336b51db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartoci pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartoci drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
*/
__device__ float compute_gig_1_2(int *v1s, int *v2s, int *ds, int num_objects, float p)
{
int count[2][3][3] = { 0 };
for (int i = 0; i < num_objects; ++i) {
int d = ds[i]; //(ds[i / 8] << (i % 8)) & 1;
int v1 = v1s[i]; //(vars[v1_p * num_objects + i / 4] << (i % 4)) & 3;
int v2 = v2s[i]; //(vars[v2_p * num_objects + i / 4] << (i % 4)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartoci zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
* - ilo zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(int *vars, int *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(&vars[v1_p * num_objects], &vars[v2_p * num_objects], ds, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(int *vars, int *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
float gig = compute_gig_1_2(&vars[v1_p * num_objects], &vars[v2_p * num_objects], ds, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
SyncArray2D<int> vars(num_vars, num_objects);
SyncArray<int> ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
scanf("%d", &ds.getHostEl(i));
for (int j = 0; j < num_vars; ++j)
scanf("%d", &vars.getHostEl(j, i));
}
input = timer.lap();
}
/* Kopiujemy dane na kart */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowan prb na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_kernel), dim3(grid_size), dim3(block_size), 0, 0, (int*)vars.getDevice(), (int*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spjnego kawaka pamici,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
ktry zapisuje tylko wartoci wiksze ni threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_wt_kernel), dim3(grid_size), dim3(block_size), 0, 0, (int*)vars.getDevice(), (int*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
| 4270bdcb95d79de0198dbe260772438a336b51db.cu | #include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartości pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartości drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
*/
__device__ float compute_gig_1_2(int *v1s, int *v2s, int *ds, int num_objects, float p)
{
int count[2][3][3] = { 0 };
for (int i = 0; i < num_objects; ++i) {
int d = ds[i]; //(ds[i / 8] << (i % 8)) & 1;
int v1 = v1s[i]; //(vars[v1_p * num_objects + i / 4] << (i % 4)) & 3;
int v2 = v2s[i]; //(vars[v2_p * num_objects + i / 4] << (i % 4)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartości zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
* - ilość zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(int *vars, int *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(&vars[v1_p * num_objects], &vars[v2_p * num_objects], ds, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(int *vars, int *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
float gig = compute_gig_1_2(&vars[v1_p * num_objects], &vars[v2_p * num_objects], ds, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejąco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
SyncArray2D<int> vars(num_vars, num_objects);
SyncArray<int> ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
scanf("%d", &ds.getHostEl(i));
for (int j = 0; j < num_vars; ++j)
scanf("%d", &vars.getHostEl(j, i));
}
input = timer.lap();
}
/* Kopiujemy dane na kartę */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowaną próbę na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
compute_gig_kernel<<<grid_size, block_size>>>((int*)vars.getDevice(), (int*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spójnego kawałka pamięci,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejąco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
który zapisuje tylko wartości większe niż threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
compute_gig_wt_kernel<<<grid_size, block_size>>>((int*)vars.getDevice(), (int*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
|
01c313c4cb8e7bb092075b32d608bf53bc93069b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _OPTIMISATION_KERNEL_Z_H_
#define _OPTIMISATION_KERNEL_Z_H_
#include "GpGpu/GpGpu_StreamData.cuh"
#include "GpGpu/SData2Optimize.h"
// On pourrait imaginer un buffer des tailles calculer en parallel
// SIZEBUFFER[threadIdx.x] = count(lI[threadIdx.x]);
__device__ void GetConeZ(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
aDz.x = aZ_Prev.x-aZ;
if (aZ != aZ_Next.x)
aDz.x = max(aDz.x,-MaxDeltaZ);
aDz.y = aZ_Prev.y-1-aZ;
if (aZ != aZ_Next.y-1)
aDz.y = min(aDz.y,MaxDeltaZ);
if (aDz.x > aDz.y)
if (aDz.y <0)
aDz.x = aDz.y;
else
aDz.y = aDz.x;
}
__device__ void BasicComputeIntervaleDelta
(
short2 & aDz,
int aZ,
int MaxDeltaZ,
short2 aZ_Prev
)
{
aDz.x = max(-MaxDeltaZ,aZ_Prev.x-aZ);
aDz.y = min(MaxDeltaZ,aZ_Prev.y-1-aZ);
}
inline __device__ uint minR(uint *sMin, uint &globalMin){ // TODO attention ajout de inline
ushort thread2;
uint temp;
//
int nTotalThreads = WARPSIZE; // Total number of threads, rounded up to the next power of two
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
{
thread2 = threadIdx.x + halfPoint;
// Skipping the fictious threads blockDim.x ... blockDim_2-1
if (thread2 < blockDim.x)
{
// Get the shared value stored by another thread
temp = sMin[thread2];
if (temp < sMin[threadIdx.x])
sMin[threadIdx.x] = temp;
}
}
// Reducing the binary tree size by two:
nTotalThreads = halfPoint;
}
const uint minus = sMin[0];
if(minus < globalMin) globalMin = minus;
return minus;
}
template<bool sens> __device__
inline uint __choose(uint kav,uint kar)
{
return 0;
}
template<> __device__
inline uint __choose<true>(uint kav,uint kar)
{
return kav;
}
template<> __device__
inline uint __choose<false>(uint kav,uint kar)
{
return kar;
}
template<bool sens> __device__
inline ushort __choose(ushort kav,ushort kar)
{
return 0;
}
template<> __device__
inline ushort __choose<true>(ushort kav,ushort kar)
{
return kav;
}
template<> __device__
inline ushort __choose<false>(ushort kav,ushort kar)
{
return kar;
}
template<bool sens> __device__
inline short __choose(short kav,short kar)
{
return 0;
}
template<> __device__
inline short __choose<true>(short kav,short kar)
{
return kav;
}
template<> __device__
inline short __choose<false>(short kav,short kar)
{
return kar;
}
template<bool autoMask> __device__
inline void getIntervale(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev){}
template<> __device__
inline void getIntervale<true>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
BasicComputeIntervaleDelta(aDz,aZ,MaxDeltaZ,aZ_Prev);
}
template<> __device__
inline void getIntervale<false>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
GetConeZ(aDz,aZ,MaxDeltaZ,aZ_Next,aZ_Prev);
}
template<bool autoMask> __device__
inline uint getCostInit(uint maskCost,uint costInit,bool mask){return 0;}
template<> __device__
inline uint getCostInit<true>(uint maskCost,uint costInit,bool mask)
{
return mask ? maskCost : costInit;
}
template<> __device__
inline uint getCostInit<false>(uint maskCost,uint costInit,bool mask)
{
return costInit;
}
template<bool autoMask> __device__
inline void connectMask(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask){}
template<> __device__
inline void connectMask<true>(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask)
{
if(!mask)
costMin = min(costMin, costInit + prevDefCor + costTransDefMask );
}
template<bool sens> __device__
inline short __delta()
{
return 0;
}
template<> __device__
inline short __delta<true>()
{
return 0;
}
template<> __device__
inline short __delta<false>()
{
return -WARPSIZE + 1;
}
template<bool sens,bool hasMask> __device__
void connectCellsLine(
SimpleStream<short3> &streamIndex,
SimpleStream<uint> &streamFCost,
SimpleStream<ushort> &streamICost,
SimpleStream<uint> &streamDefCor,
short3 *S_Bf_Index,
ushort *ST_Bf_ICost,
uint *S_FCost[2],
p_ReadLine &p
)
{
short3* ST_Bf_Index = S_Bf_Index + p.tid + __delta<sens>();
__shared__ uint minCost[WARPSIZE];
short2 ConeZ;
uint globMinFCost;
bool lined = p.line.id < p.line.lenght;
const int regulZ = (int)((float)10000.f*p.ZRegul);
// Remarque
// p.seg.id = 1 au premier passage, car simple copie des initcost
//////////////////////////////////////////////////
/// TODO!!!! : quel doit etre prevDefCor p.costTransDefMask + p.costDefMask ou p.costDefMask
/////////////////////////////////////////////////
uint prevDefCor =/* p.costTransDefMask + */p.prevDefCor; // TODO Voir la valeur mettre!!!
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline, p.line.lenght - idGline),prevDefCor);
uint prevMinCostCells = 0; // TODO cette valeur doit etre determiner
uint prevMinCost = 0;
while(lined)
{
while(p.seg.id < p.seg.lenght)
{
const short3 dTer = S_Bf_Index[sgn(p.seg.id)];
const short2 indexZ = make_short2(dTer.x,dTer.y);
const ushort cDefCor = dTer.z;
const bool maskTer = cDefCor == 0;
const ushort dZ = count(indexZ); // creer buffer de count
ushort z = 0;
globMinFCost = max_cost;
while( z < dZ)
{
// Lecture du stream si le buffer est vide | TODO VERIFIER si > ou >=
if(p.ID_Bf_Icost >= p.sizeBuffer)
{
streamICost.read<sens>(ST_Bf_ICost); // Lecture des couts correlations
streamFCost.incre<sens>(); // Pointage sur la sortie
p.ID_Bf_Icost = 0; // Pointage la premire valeur du buffer des couts correlations
}
uint fCostMin = max_cost;
uint costInit = getCostInit<hasMask>(500000,ST_Bf_ICost[sgn(p.ID_Bf_Icost)],maskTer);
const ushort tZ = z + p.stid<sens>();
const short Z = __choose<sens>((short)(tZ + indexZ.x),(short)(indexZ.y - tZ - 1));
const short pitPrZ = __choose<sens>((short)(Z - p.prev_Dz.x ), (short)(p.prev_Dz.y - Z - 1));
getIntervale<hasMask>(ConeZ,Z,p.pente,indexZ,p.prev_Dz);
uint* prevFCost = S_FCost[p.Id_Buf] + sgn(pitPrZ);
ConeZ.y = min(p.sizeBuffer - pitPrZ,ConeZ.y );
for (short i = ConeZ.x; i <= ConeZ.y; ++i) //--> TO DO cette etape n'est pas necessaire si nous sommes en dehors du masque Ter
fCostMin = min(fCostMin, costInit + prevFCost[i] + abs((int)i)*regulZ);
connectMask<hasMask>(fCostMin,costInit,prevDefCor,p.costTransDefMask,maskTer);
if(tZ < dZ && p.ID_Bf_Icost + p.stid<sens>() < p.sizeBuffer && tZ < p.sizeBuffer)
{
fCostMin -= prevMinCost;
minCost[p.tid] = fCostMin;
S_FCost[!p.Id_Buf][sgn(tZ)] = fCostMin;
streamFCost.SetOrAddValue<sens>(sgn(p.ID_Bf_Icost),fCostMin,fCostMin - costInit);
}
else
minCost[p.tid] = max_cost;
minR(minCost,globMinFCost); // TODO verifier cette fonction elle peut lancer trop de fois..... Attentioncd ,inline en attendant
const ushort pIdCost = p.ID_Bf_Icost;
p.ID_Bf_Icost += min(dZ - z , WARPSIZE);
z += min(p.sizeBuffer-pIdCost , WARPSIZE);
}
if(hasMask)
{
uint defCor = prevDefCor + cDefCor;
if(p.prevDefCor != 0)
defCor = min(defCor,cDefCor + prevMinCostCells + p.costTransDefMask);
prevDefCor = defCor - prevMinCost;
prevMinCostCells = globMinFCost;
prevMinCost = min(globMinFCost,prevDefCor);
p.prevDefCor = cDefCor;
if(p.tid == 0)
{
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline , p.line.lenght - idGline),prevDefCor,prevDefCor-cDefCor);
}
}
else
prevMinCost = globMinFCost;
p.prev_Dz = indexZ;
p.seg.id++;
p.swBuf();
}
p.line.id += p.seg.lenght;
lined = p.line.id < p.line.lenght;
if(lined)
{
streamIndex.read<sens>(ST_Bf_Index);
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
p.seg.id = 0; // position dans le segment du stream index des Z
}
}
}
// TODO Passer les parametres en variable constante !!!!!!!!!!!
template<class T> __global__
void Kernel_OptimisationOneDirection(ushort* g_ICost, short3* g_Index, uint* g_FCost, uint* g_DefCor, uint3* g_RecStrParam, ushort penteMax, float zReg,float zRegQuad, ushort costDefMask,ushort costTransDefMask,ushort sizeBuffer,bool hasMaskauto)
{
extern __shared__ float sharedMemory[];
ushort* S_BuffICost0 = (ushort*) sharedMemory;
uint* S_BuffFCost0 = (uint*) &S_BuffICost0[sizeBuffer + 2*WARPSIZE];
uint* S_BuffFCost1 = (uint*) &S_BuffFCost0[sizeBuffer + 2*WARPSIZE];
short3* S_BuffIndex = (short3*) &S_BuffFCost1[sizeBuffer + 2*WARPSIZE];
uint* pit_Id = (uint*) &S_BuffIndex[WARPSIZE];
uint* pit_Stream = pit_Id + 1;
p_ReadLine p(threadIdx.x,penteMax,zReg,zRegQuad,costDefMask,costTransDefMask,sizeBuffer,hasMaskauto);
uint* S_BuffFCost[2] = {S_BuffFCost0 + WARPSIZE,S_BuffFCost1 + WARPSIZE};
ushort* S_BuffICost = S_BuffICost0 + WARPSIZE + p.tid;
if(!threadIdx.x)
{
*pit_Stream = g_RecStrParam[blockIdx.x].x;
*pit_Id = g_RecStrParam[blockIdx.x].y;
}
__syncthreads();
p.line.lenght = g_RecStrParam[blockIdx.x].z;
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
SimpleStream<ushort> streamICost( g_ICost + *pit_Stream ,sizeBuffer);
SimpleStream<uint> streamFCost( g_FCost + *pit_Stream ,sizeBuffer);
SimpleStream<short3> streamIndex( g_Index + *pit_Id ,WARPSIZE);
SimpleStream<uint> streamDefCor( g_DefCor + *pit_Id ,WARPSIZE);
if(p.tid == 0)
streamDefCor.SetValue(0,0); // car la premiere ligne n'est calculer
// Attention voir pour le retour arriere
streamICost.read<eAVANT>(S_BuffICost);
streamIndex.read<eAVANT>(S_BuffIndex + p.tid);
p.prev_Dz = make_short2(S_BuffIndex[0].x,S_BuffIndex[0].y);
p.prevDefCor = S_BuffIndex[0].z;
p.ID_Bf_Icost = count(p.prev_Dz);
for (ushort i = 0; i < p.ID_Bf_Icost - p.tid; i+=WARPSIZE)
{
S_BuffFCost[p.Id_Buf][i + p.tid] = S_BuffICost[i];
streamFCost.SetValue(i,S_BuffICost[i]);
}
connectCellsLine<eAVANT,true>(streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex,S_BuffICost,S_BuffFCost,p);
streamIndex.ReverseIncre<eARRIERE>();
streamFCost.incre<eAVANT>();
streamFCost.reverse<eARRIERE>();
S_BuffFCost[0] += sizeBuffer;
S_BuffFCost[1] += sizeBuffer;
S_BuffICost += sizeBuffer - WARPSIZE;
streamICost.readFrom<eARRIERE>(S_BuffFCost[p.Id_Buf] + p.tid, sizeBuffer - p.ID_Bf_Icost);
streamICost.ReverseIncre<eARRIERE>();
p.reverse(S_BuffIndex,sizeBuffer);
if(p.ID_Bf_Icost > sizeBuffer)
{
p.ID_Bf_Icost -= sizeBuffer;
streamICost.read<eARRIERE>(S_BuffICost);
streamFCost.incre<eARRIERE>();
}
uint* locFCost = S_BuffFCost[p.Id_Buf] - p.stid<eARRIERE>();
for (ushort i = 0; i < sizeBuffer; i+=WARPSIZE)
locFCost[-i] = S_BuffICost[-i];
connectCellsLine<eARRIERE,true>( streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex + WARPSIZE - 1,S_BuffICost,S_BuffFCost,p);
}
extern "C" void Gpu_OptimisationOneDirection(Data2Optimiz<CuDeviceData3D> &d2O)
{
ushort deltaMax = d2O.penteMax();
float zReg = (float)d2O.zReg();
float zRegQuad = d2O.zRegQuad();
ushort costDefMask = d2O.CostDefMasked();
ushort costTransDefMask = d2O.CostTransMaskNoMask();
bool hasMaskauto = d2O.hasMaskAuto();
dim3 Threads(WARPSIZE,1,1);
dim3 Blocks(d2O.NBlines(),1,1);
ushort sizeBuff = min(d2O.DzMax(),4096); //NAPPEMAX;
ushort cacheLin = sizeBuff + 2 * WARPSIZE;
// Calcul de l'allocation dynamique de la memoire partage
uint sizeSharedMemory =
cacheLin * sizeof(ushort) + // S_BuffICost0
cacheLin * sizeof(uint) + // S_BuffFCost0
cacheLin * sizeof(uint) + // S_BuffFCost1
WARPSIZE * sizeof(short3) + // S_BuffIndex
// WARPSIZE * sizeof(uint) + // S_BuffDefCor
sizeof(uint) + // pit_Id
sizeof(uint); // pit_Stream
hipLaunchKernelGGL(( Kernel_OptimisationOneDirection< uint >), dim3(Blocks),dim3(Threads),sizeSharedMemory, 0,
d2O.pInitCost(),
d2O.pIndex(),
d2O.pForceCostVol(),
d2O.pDefCor(),
d2O.pParam(),
deltaMax,
zReg,
zRegQuad,
costDefMask,
costTransDefMask,
sizeBuff,
hasMaskauto
);
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
printf("Error CUDA Gpu_OptimisationOneDirection\n");
printf("%s",hipGetErrorString(err));
DUMP(d2O.NBlines());
DUMP(sizeSharedMemory);
DUMP(d2O.DzMax());
}
getLastCudaError("TestkernelOptiOneDirection failed");
}
#endif //_OPTIMISATION_KERNEL_Z_H_
| 01c313c4cb8e7bb092075b32d608bf53bc93069b.cu | #ifndef _OPTIMISATION_KERNEL_Z_H_
#define _OPTIMISATION_KERNEL_Z_H_
#include "GpGpu/GpGpu_StreamData.cuh"
#include "GpGpu/SData2Optimize.h"
// On pourrait imaginer un buffer des tailles calculer en parallel
// SIZEBUFFER[threadIdx.x] = count(lI[threadIdx.x]);
__device__ void GetConeZ(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
aDz.x = aZ_Prev.x-aZ;
if (aZ != aZ_Next.x)
aDz.x = max(aDz.x,-MaxDeltaZ);
aDz.y = aZ_Prev.y-1-aZ;
if (aZ != aZ_Next.y-1)
aDz.y = min(aDz.y,MaxDeltaZ);
if (aDz.x > aDz.y)
if (aDz.y <0)
aDz.x = aDz.y;
else
aDz.y = aDz.x;
}
__device__ void BasicComputeIntervaleDelta
(
short2 & aDz,
int aZ,
int MaxDeltaZ,
short2 aZ_Prev
)
{
aDz.x = max(-MaxDeltaZ,aZ_Prev.x-aZ);
aDz.y = min(MaxDeltaZ,aZ_Prev.y-1-aZ);
}
inline __device__ uint minR(uint *sMin, uint &globalMin){ // TODO attention ajout de inline
ushort thread2;
uint temp;
//
int nTotalThreads = WARPSIZE; // Total number of threads, rounded up to the next power of two
while(nTotalThreads > 1)
{
int halfPoint = (nTotalThreads >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
{
thread2 = threadIdx.x + halfPoint;
// Skipping the fictious threads blockDim.x ... blockDim_2-1
if (thread2 < blockDim.x)
{
// Get the shared value stored by another thread
temp = sMin[thread2];
if (temp < sMin[threadIdx.x])
sMin[threadIdx.x] = temp;
}
}
// Reducing the binary tree size by two:
nTotalThreads = halfPoint;
}
const uint minus = sMin[0];
if(minus < globalMin) globalMin = minus;
return minus;
}
template<bool sens> __device__
inline uint __choose(uint kav,uint kar)
{
return 0;
}
template<> __device__
inline uint __choose<true>(uint kav,uint kar)
{
return kav;
}
template<> __device__
inline uint __choose<false>(uint kav,uint kar)
{
return kar;
}
template<bool sens> __device__
inline ushort __choose(ushort kav,ushort kar)
{
return 0;
}
template<> __device__
inline ushort __choose<true>(ushort kav,ushort kar)
{
return kav;
}
template<> __device__
inline ushort __choose<false>(ushort kav,ushort kar)
{
return kar;
}
template<bool sens> __device__
inline short __choose(short kav,short kar)
{
return 0;
}
template<> __device__
inline short __choose<true>(short kav,short kar)
{
return kav;
}
template<> __device__
inline short __choose<false>(short kav,short kar)
{
return kar;
}
template<bool autoMask> __device__
inline void getIntervale(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev){}
template<> __device__
inline void getIntervale<true>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
BasicComputeIntervaleDelta(aDz,aZ,MaxDeltaZ,aZ_Prev);
}
template<> __device__
inline void getIntervale<false>(short2 & aDz, int aZ, int MaxDeltaZ, short2 aZ_Next, short2 aZ_Prev)
{
GetConeZ(aDz,aZ,MaxDeltaZ,aZ_Next,aZ_Prev);
}
template<bool autoMask> __device__
inline uint getCostInit(uint maskCost,uint costInit,bool mask){return 0;}
template<> __device__
inline uint getCostInit<true>(uint maskCost,uint costInit,bool mask)
{
return mask ? maskCost : costInit;
}
template<> __device__
inline uint getCostInit<false>(uint maskCost,uint costInit,bool mask)
{
return costInit;
}
template<bool autoMask> __device__
inline void connectMask(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask){}
template<> __device__
inline void connectMask<true>(uint &costMin,uint costInit, uint prevDefCor, ushort costTransDefMask,bool mask)
{
if(!mask)
costMin = min(costMin, costInit + prevDefCor + costTransDefMask );
}
template<bool sens> __device__
inline short __delta()
{
return 0;
}
template<> __device__
inline short __delta<true>()
{
return 0;
}
template<> __device__
inline short __delta<false>()
{
return -WARPSIZE + 1;
}
template<bool sens,bool hasMask> __device__
void connectCellsLine(
SimpleStream<short3> &streamIndex,
SimpleStream<uint> &streamFCost,
SimpleStream<ushort> &streamICost,
SimpleStream<uint> &streamDefCor,
short3 *S_Bf_Index,
ushort *ST_Bf_ICost,
uint *S_FCost[2],
p_ReadLine &p
)
{
short3* ST_Bf_Index = S_Bf_Index + p.tid + __delta<sens>();
__shared__ uint minCost[WARPSIZE];
short2 ConeZ;
uint globMinFCost;
bool lined = p.line.id < p.line.lenght;
const int regulZ = (int)((float)10000.f*p.ZRegul);
// Remarque
// p.seg.id = 1 au premier passage, car simple copie des initcost
//////////////////////////////////////////////////
/// TODO!!!! : quel doit etre prevDefCor p.costTransDefMask + p.costDefMask ou p.costDefMask
/////////////////////////////////////////////////
uint prevDefCor =/* p.costTransDefMask + */p.prevDefCor; // TODO Voir la valeur à mettre!!!
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline, p.line.lenght - idGline),prevDefCor);
uint prevMinCostCells = 0; // TODO cette valeur doit etre determiner
uint prevMinCost = 0;
while(lined)
{
while(p.seg.id < p.seg.lenght)
{
const short3 dTer = S_Bf_Index[sgn(p.seg.id)];
const short2 indexZ = make_short2(dTer.x,dTer.y);
const ushort cDefCor = dTer.z;
const bool maskTer = cDefCor == 0;
const ushort dZ = count(indexZ); // creer buffer de count
ushort z = 0;
globMinFCost = max_cost;
while( z < dZ)
{
// Lecture du stream si le buffer est vide | TODO VERIFIER si > ou >=
if(p.ID_Bf_Icost >= p.sizeBuffer)
{
streamICost.read<sens>(ST_Bf_ICost); // Lecture des couts correlations
streamFCost.incre<sens>(); // Pointage sur la sortie
p.ID_Bf_Icost = 0; // Pointage la première valeur du buffer des couts correlations
}
uint fCostMin = max_cost;
uint costInit = getCostInit<hasMask>(500000,ST_Bf_ICost[sgn(p.ID_Bf_Icost)],maskTer);
const ushort tZ = z + p.stid<sens>();
const short Z = __choose<sens>((short)(tZ + indexZ.x),(short)(indexZ.y - tZ - 1));
const short pitPrZ = __choose<sens>((short)(Z - p.prev_Dz.x ), (short)(p.prev_Dz.y - Z - 1));
getIntervale<hasMask>(ConeZ,Z,p.pente,indexZ,p.prev_Dz);
uint* prevFCost = S_FCost[p.Id_Buf] + sgn(pitPrZ);
ConeZ.y = min(p.sizeBuffer - pitPrZ,ConeZ.y );
for (short i = ConeZ.x; i <= ConeZ.y; ++i) //--> TO DO cette etape n'est pas necessaire si nous sommes en dehors du masque Ter
fCostMin = min(fCostMin, costInit + prevFCost[i] + abs((int)i)*regulZ);
connectMask<hasMask>(fCostMin,costInit,prevDefCor,p.costTransDefMask,maskTer);
if(tZ < dZ && p.ID_Bf_Icost + p.stid<sens>() < p.sizeBuffer && tZ < p.sizeBuffer)
{
fCostMin -= prevMinCost;
minCost[p.tid] = fCostMin;
S_FCost[!p.Id_Buf][sgn(tZ)] = fCostMin;
streamFCost.SetOrAddValue<sens>(sgn(p.ID_Bf_Icost),fCostMin,fCostMin - costInit);
}
else
minCost[p.tid] = max_cost;
minR(minCost,globMinFCost); // TODO verifier cette fonction elle peut lancer trop de fois..... Attentioncd ,inline en attendant
const ushort pIdCost = p.ID_Bf_Icost;
p.ID_Bf_Icost += min(dZ - z , WARPSIZE);
z += min(p.sizeBuffer-pIdCost , WARPSIZE);
}
if(hasMask)
{
uint defCor = prevDefCor + cDefCor;
if(p.prevDefCor != 0)
defCor = min(defCor,cDefCor + prevMinCostCells + p.costTransDefMask);
prevDefCor = defCor - prevMinCost;
prevMinCostCells = globMinFCost;
prevMinCost = min(globMinFCost,prevDefCor);
p.prevDefCor = cDefCor;
if(p.tid == 0)
{
const ushort idGline = p.line.id + p.seg.id;
streamDefCor.SetOrAddValue<sens>(__choose<sens>((uint)idGline , p.line.lenght - idGline),prevDefCor,prevDefCor-cDefCor);
}
}
else
prevMinCost = globMinFCost;
p.prev_Dz = indexZ;
p.seg.id++;
p.swBuf();
}
p.line.id += p.seg.lenght;
lined = p.line.id < p.line.lenght;
if(lined)
{
streamIndex.read<sens>(ST_Bf_Index);
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
p.seg.id = 0; // position dans le segment du stream index des Z
}
}
}
// TODO Passer les parametres en variable constante !!!!!!!!!!!
template<class T> __global__
void Kernel_OptimisationOneDirection(ushort* g_ICost, short3* g_Index, uint* g_FCost, uint* g_DefCor, uint3* g_RecStrParam, ushort penteMax, float zReg,float zRegQuad, ushort costDefMask,ushort costTransDefMask,ushort sizeBuffer,bool hasMaskauto)
{
extern __shared__ float sharedMemory[];
ushort* S_BuffICost0 = (ushort*) sharedMemory;
uint* S_BuffFCost0 = (uint*) &S_BuffICost0[sizeBuffer + 2*WARPSIZE];
uint* S_BuffFCost1 = (uint*) &S_BuffFCost0[sizeBuffer + 2*WARPSIZE];
short3* S_BuffIndex = (short3*) &S_BuffFCost1[sizeBuffer + 2*WARPSIZE];
uint* pit_Id = (uint*) &S_BuffIndex[WARPSIZE];
uint* pit_Stream = pit_Id + 1;
p_ReadLine p(threadIdx.x,penteMax,zReg,zRegQuad,costDefMask,costTransDefMask,sizeBuffer,hasMaskauto);
uint* S_BuffFCost[2] = {S_BuffFCost0 + WARPSIZE,S_BuffFCost1 + WARPSIZE};
ushort* S_BuffICost = S_BuffICost0 + WARPSIZE + p.tid;
if(!threadIdx.x)
{
*pit_Stream = g_RecStrParam[blockIdx.x].x;
*pit_Id = g_RecStrParam[blockIdx.x].y;
}
__syncthreads();
p.line.lenght = g_RecStrParam[blockIdx.x].z;
p.seg.lenght = min(p.line.LOver(),WARPSIZE);
SimpleStream<ushort> streamICost( g_ICost + *pit_Stream ,sizeBuffer);
SimpleStream<uint> streamFCost( g_FCost + *pit_Stream ,sizeBuffer);
SimpleStream<short3> streamIndex( g_Index + *pit_Id ,WARPSIZE);
SimpleStream<uint> streamDefCor( g_DefCor + *pit_Id ,WARPSIZE);
if(p.tid == 0)
streamDefCor.SetValue(0,0); // car la premiere ligne n'est calculer
// Attention voir pour le retour arriere
streamICost.read<eAVANT>(S_BuffICost);
streamIndex.read<eAVANT>(S_BuffIndex + p.tid);
p.prev_Dz = make_short2(S_BuffIndex[0].x,S_BuffIndex[0].y);
p.prevDefCor = S_BuffIndex[0].z;
p.ID_Bf_Icost = count(p.prev_Dz);
for (ushort i = 0; i < p.ID_Bf_Icost - p.tid; i+=WARPSIZE)
{
S_BuffFCost[p.Id_Buf][i + p.tid] = S_BuffICost[i];
streamFCost.SetValue(i,S_BuffICost[i]);
}
connectCellsLine<eAVANT,true>(streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex,S_BuffICost,S_BuffFCost,p);
streamIndex.ReverseIncre<eARRIERE>();
streamFCost.incre<eAVANT>();
streamFCost.reverse<eARRIERE>();
S_BuffFCost[0] += sizeBuffer;
S_BuffFCost[1] += sizeBuffer;
S_BuffICost += sizeBuffer - WARPSIZE;
streamICost.readFrom<eARRIERE>(S_BuffFCost[p.Id_Buf] + p.tid, sizeBuffer - p.ID_Bf_Icost);
streamICost.ReverseIncre<eARRIERE>();
p.reverse(S_BuffIndex,sizeBuffer);
if(p.ID_Bf_Icost > sizeBuffer)
{
p.ID_Bf_Icost -= sizeBuffer;
streamICost.read<eARRIERE>(S_BuffICost);
streamFCost.incre<eARRIERE>();
}
uint* locFCost = S_BuffFCost[p.Id_Buf] - p.stid<eARRIERE>();
for (ushort i = 0; i < sizeBuffer; i+=WARPSIZE)
locFCost[-i] = S_BuffICost[-i];
connectCellsLine<eARRIERE,true>( streamIndex,streamFCost,streamICost,streamDefCor,S_BuffIndex + WARPSIZE - 1,S_BuffICost,S_BuffFCost,p);
}
extern "C" void Gpu_OptimisationOneDirection(Data2Optimiz<CuDeviceData3D> &d2O)
{
ushort deltaMax = d2O.penteMax();
float zReg = (float)d2O.zReg();
float zRegQuad = d2O.zRegQuad();
ushort costDefMask = d2O.CostDefMasked();
ushort costTransDefMask = d2O.CostTransMaskNoMask();
bool hasMaskauto = d2O.hasMaskAuto();
dim3 Threads(WARPSIZE,1,1);
dim3 Blocks(d2O.NBlines(),1,1);
ushort sizeBuff = min(d2O.DzMax(),4096); //NAPPEMAX;
ushort cacheLin = sizeBuff + 2 * WARPSIZE;
// Calcul de l'allocation dynamique de la memoire partagée
uint sizeSharedMemory =
cacheLin * sizeof(ushort) + // S_BuffICost0
cacheLin * sizeof(uint) + // S_BuffFCost0
cacheLin * sizeof(uint) + // S_BuffFCost1
WARPSIZE * sizeof(short3) + // S_BuffIndex
// WARPSIZE * sizeof(uint) + // S_BuffDefCor
sizeof(uint) + // pit_Id
sizeof(uint); // pit_Stream
Kernel_OptimisationOneDirection< uint ><<<Blocks,Threads,sizeSharedMemory>>>
(
d2O.pInitCost(),
d2O.pIndex(),
d2O.pForceCostVol(),
d2O.pDefCor(),
d2O.pParam(),
deltaMax,
zReg,
zRegQuad,
costDefMask,
costTransDefMask,
sizeBuff,
hasMaskauto
);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
printf("Error CUDA Gpu_OptimisationOneDirection\n");
printf("%s",cudaGetErrorString(err));
DUMP(d2O.NBlines());
DUMP(sizeSharedMemory);
DUMP(d2O.DzMax());
}
getLastCudaError("TestkernelOptiOneDirection failed");
}
#endif //_OPTIMISATION_KERNEL_Z_H_
|
66828c017f8ee6fc0e8b92e8d2a113cdebee518a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaSToOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int nbProposals = 1;
const unsigned int scoreIdx = 1;
const unsigned int nbCls = 1;
const unsigned int nbOutputs = 1;
const unsigned int maxParts = 1;
const unsigned int maxTemplates = 1;
bool generateParts = 1;
bool generateTemplates = 1;
const int *numPartsPerClass = NULL;
hipMalloc(&numPartsPerClass, XSIZE*YSIZE);
const int *numTemplatesPerClass = NULL;
hipMalloc(&numTemplatesPerClass, XSIZE*YSIZE);
const int *maxCls = NULL;
hipMalloc(&maxCls, XSIZE*YSIZE);
const float *ROIEst = NULL;
hipMalloc(&ROIEst, XSIZE*YSIZE);
const int *predictionIndex = NULL;
hipMalloc(&predictionIndex, XSIZE*YSIZE);
const float *partsPrediction = NULL;
hipMalloc(&partsPrediction, XSIZE*YSIZE);
const float *partsVisibilityPrediction = NULL;
hipMalloc(&partsVisibilityPrediction, XSIZE*YSIZE);
const float *templatesPrediction = NULL;
hipMalloc(&templatesPrediction, XSIZE*YSIZE);
float *outputs = NULL;
hipMalloc(&outputs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaSToOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nbProposals,scoreIdx,nbCls,nbOutputs,maxParts,maxTemplates,generateParts,generateTemplates,numPartsPerClass,numTemplatesPerClass,maxCls,ROIEst,predictionIndex,partsPrediction,partsVisibilityPrediction,templatesPrediction,outputs);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaSToOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nbProposals,scoreIdx,nbCls,nbOutputs,maxParts,maxTemplates,generateParts,generateTemplates,numPartsPerClass,numTemplatesPerClass,maxCls,ROIEst,predictionIndex,partsPrediction,partsVisibilityPrediction,templatesPrediction,outputs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaSToOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nbProposals,scoreIdx,nbCls,nbOutputs,maxParts,maxTemplates,generateParts,generateTemplates,numPartsPerClass,numTemplatesPerClass,maxCls,ROIEst,predictionIndex,partsPrediction,partsVisibilityPrediction,templatesPrediction,outputs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 66828c017f8ee6fc0e8b92e8d2a113cdebee518a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaSToOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const unsigned int nbProposals = 1;
const unsigned int scoreIdx = 1;
const unsigned int nbCls = 1;
const unsigned int nbOutputs = 1;
const unsigned int maxParts = 1;
const unsigned int maxTemplates = 1;
bool generateParts = 1;
bool generateTemplates = 1;
const int *numPartsPerClass = NULL;
cudaMalloc(&numPartsPerClass, XSIZE*YSIZE);
const int *numTemplatesPerClass = NULL;
cudaMalloc(&numTemplatesPerClass, XSIZE*YSIZE);
const int *maxCls = NULL;
cudaMalloc(&maxCls, XSIZE*YSIZE);
const float *ROIEst = NULL;
cudaMalloc(&ROIEst, XSIZE*YSIZE);
const int *predictionIndex = NULL;
cudaMalloc(&predictionIndex, XSIZE*YSIZE);
const float *partsPrediction = NULL;
cudaMalloc(&partsPrediction, XSIZE*YSIZE);
const float *partsVisibilityPrediction = NULL;
cudaMalloc(&partsVisibilityPrediction, XSIZE*YSIZE);
const float *templatesPrediction = NULL;
cudaMalloc(&templatesPrediction, XSIZE*YSIZE);
float *outputs = NULL;
cudaMalloc(&outputs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaSToOutput_kernel<<<gridBlock,threadBlock>>>(nbProposals,scoreIdx,nbCls,nbOutputs,maxParts,maxTemplates,generateParts,generateTemplates,numPartsPerClass,numTemplatesPerClass,maxCls,ROIEst,predictionIndex,partsPrediction,partsVisibilityPrediction,templatesPrediction,outputs);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaSToOutput_kernel<<<gridBlock,threadBlock>>>(nbProposals,scoreIdx,nbCls,nbOutputs,maxParts,maxTemplates,generateParts,generateTemplates,numPartsPerClass,numTemplatesPerClass,maxCls,ROIEst,predictionIndex,partsPrediction,partsVisibilityPrediction,templatesPrediction,outputs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaSToOutput_kernel<<<gridBlock,threadBlock>>>(nbProposals,scoreIdx,nbCls,nbOutputs,maxParts,maxTemplates,generateParts,generateTemplates,numPartsPerClass,numTemplatesPerClass,maxCls,ROIEst,predictionIndex,partsPrediction,partsVisibilityPrediction,templatesPrediction,outputs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
48dc96cb16d97942bc280bf29f4546180d531e4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_atomic_functions.hpp"
#include "device_functions.hpp"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "ImgProcess.h"
#include <stdio.h>
#define Pi 3.14159265359
__global__ void general2final_kernel(int iw, int ih, float *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
dest[iw*y + x] = (unsigned char)source[iw*y + x];
}
__global__ void treshold_kernel(int iw, int ih, int binary_treshold, unsigned char *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (y < 10)
dest[iw*y + x] = 0;
if (y > ih - 10)
dest[iw*y + x] = 0;
if (x < 10)
dest[iw*y + x] = 0;
if (x > iw - 10)
dest[iw*y + x] = 0;
if ((unsigned char)dest[iw*y + x] > 60)
dest[iw*y + x] = 255;
else
dest[iw*y + x] = 0;
__syncthreads();
}
__global__ void Profile_kernel(int iw, int ih, unsigned char *source, double *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= 0 && x < iw && y >= 0 && y < ih)
{
dest[x] += source[iw*y + x];
//atomicAdd(&dest[x], source[iw*y + x]); // so better but I dont know why it doesnt declared :|
}
}
__global__ void sinc_kernel(int iw, int ih, double a1, double a2, unsigned char *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int Landa = 800, Betta = 100;
double brightness;
int offset_cols = iw;
int offset_rows = ih;
double P_a1 = a1*a1;
double P_a2 = a2*a2;
brightness = -1 * Landa*sin(Pi*pow(P_a1*(x - offset_cols / 2)*(x - offset_cols / 2)*1.0 + P_a2*(y - offset_rows / 2)*(y - offset_rows / 2)*1.0, 0.5)) / (Pi*pow(P_a1*(x - offset_cols / 2)*(x - offset_cols / 2)*1.0 + P_a2*(y - offset_rows / 2)*(y - offset_rows / 2)*1.0, 0.5)) + Betta; // Y must be more than X in rectangular image when cols is more than rows
if (brightness > 255)
brightness = 255;
if (brightness < 0)
brightness = 0;
if (brightness < 50)
dest[iw*y + x] = (unsigned char)brightness;
}
__global__ void generalgradient_kernel(int iw, int ih, int frameCount, unsigned char *source, float *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
float temp;
if (x > 0 && x < iw - 1 && y > 0 && y < ih - 1)
{
temp = dest[iw*y + x];
dest[iw*y + x] = (float)(1.0*((frameCount - 1)*temp + source[iw*y + x]) / frameCount);
}
}
__global__ void boxfilter_kernel(int iw, int ih, unsigned char *source, unsigned char *dest, int bw, int bh)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int count = 0;
float sum = 0.0;
for (int j = -(bh / 2); j <= (bh / 2); j++)
for (int i = -(bw / 2); i <= (bw / 2); i++)
{
if ((x + i) < iw && (x + i) >= 0 && (y + j) < ih && (y + j) >= 0)
{
sum += (float)source[((y + j)*iw) + (x + i)];
count++;
}
}
sum /= (float)count * 2;
dest[(y*iw) + x] = (unsigned char)sum;
}
__global__ void sobelfilter_kernel(int iw, int ih, unsigned char *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x > 0 && x < iw - 1 && y > 0 && y < ih - 1)
{
int gx = -1 * source[iw*(y - 1) + (x - 1)] + source[iw*(y - 1) + (x + 1)] +
-2 * source[iw*y + (x - 1)] + 2 * source[iw*y + (x + 1)] +
-1 * source[iw*(y + 1) + (x - 1)] + source[iw*(y + 1) + (x + 1)];
int gy = -source[iw*(y - 1) + (x - 1)] - 2 * source[iw*(y - 1) + x]
- source[iw*(y - 1) + (x + 1)] +
source[iw*(y + 1) + (x - 1)] + 2 * source[iw*(y + 1) + x] +
source[iw*(y + 1) + (x + 1)];
dest[iw*y + x] = (unsigned char)sqrt((float)gx*(float)gx + (float)gy*float(gy));
}
}
extern "C" void boxfilter(int iw, int ih, unsigned char *source, unsigned char *dest, int bw, int bh)
{
unsigned char *dev_source, *dev_dest;
hipHostGetDevicePointer(&dev_source, source, 0);
hipHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
boxfilter_kernel << <blocks, threads >> >(iw, ih, dev_source, dev_dest, bw, bh);
hipDeviceSynchronize();
}
extern "C" void sobelfilter(int iw, int ih, unsigned char *source, unsigned char *dest)
{
// allocate memory for bitmap
unsigned char *dev_source, *dev_dest;
hipHostGetDevicePointer(&dev_source, source, 0);
hipHostGetDevicePointer(&dev_dest, dest, 0);
dim3 block(iw / 16, ih / 16);
dim3 threads(16, 16);
sobelfilter_kernel << <block, threads >> >(iw, ih, dev_source, dev_dest);
hipDeviceSynchronize();
}
extern "C" unsigned char* createImageBuffer(unsigned int bytes)
{
unsigned char *ptr = NULL;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc(&ptr, bytes, hipHostMallocMapped);
return ptr;
}
extern "C" float * createImageBufferFloat(unsigned int Bytes)
{
float *ptr = NULL;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc(&ptr, Bytes, hipHostMallocMapped);
return ptr;
}
void desetroyImageBuffer(unsigned char* bytes)
{
hipHostFree(bytes);
}
void sinc(int iw, int ih, double a1, double a2, unsigned char *source, unsigned char *dest)
{
unsigned char *dev_source, *dev_dest;
hipHostGetDevicePointer(&dev_source, source, 0);
hipHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
sinc_kernel << <blocks, threads >> >(iw, ih, a1, a2, dev_source, dev_dest);
hipDeviceSynchronize();
}
void generalgradient(int iw, int ih, int frameCount, unsigned char *source, float *dest)
{
unsigned char *dev_source;
float *dev_dest;
hipHostGetDevicePointer(&dev_source, source, 0);
hipHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
generalgradient_kernel << <blocks, threads >> >(iw, ih, frameCount, dev_source, dev_dest);
hipDeviceSynchronize();
}
extern "C" void treshold(int iw, int ih, int binary_treshold, unsigned char *source, unsigned char *dest)
{
unsigned char *dev_source, *dev_dest;
hipHostGetDevicePointer(&dev_source, source, 0);
hipHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
treshold_kernel << <blocks, threads >> >(iw, ih, binary_treshold, dev_source, dev_dest);
hipDeviceSynchronize();
}
extern "C" void profile(int iw, int ih, unsigned char *img, double *myarray)
{
unsigned char *dev_source;
double *dev_dest;
hipHostGetDevicePointer(&dev_source, img, 0);
hipHostGetDevicePointer(&dev_dest, myarray, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
Profile_kernel << <blocks, threads >> >(iw, ih, dev_source, dev_dest);
hipDeviceSynchronize();
}
extern "C" double * createdouble(double Bytes)
{
double *ptr = NULL;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc(&ptr, Bytes, hipHostMallocMapped);
return ptr;
}
extern "C" void general2final(int iw, int ih, float *source, unsigned char *dest)
{
float *dev_source;
unsigned char *dev_dest;
hipHostGetDevicePointer(&dev_source, source, 0);
hipHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
general2final_kernel << <blocks, threads >> >(iw, ih, dev_source, dev_dest);
hipDeviceSynchronize();
}
| 48dc96cb16d97942bc280bf29f4546180d531e4e.cu | #include "device_atomic_functions.hpp"
#include "device_functions.hpp"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "ImgProcess.h"
#include <stdio.h>
#define Pi 3.14159265359
__global__ void general2final_kernel(int iw, int ih, float *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
dest[iw*y + x] = (unsigned char)source[iw*y + x];
}
__global__ void treshold_kernel(int iw, int ih, int binary_treshold, unsigned char *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (y < 10)
dest[iw*y + x] = 0;
if (y > ih - 10)
dest[iw*y + x] = 0;
if (x < 10)
dest[iw*y + x] = 0;
if (x > iw - 10)
dest[iw*y + x] = 0;
if ((unsigned char)dest[iw*y + x] > 60)
dest[iw*y + x] = 255;
else
dest[iw*y + x] = 0;
__syncthreads();
}
__global__ void Profile_kernel(int iw, int ih, unsigned char *source, double *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x >= 0 && x < iw && y >= 0 && y < ih)
{
dest[x] += source[iw*y + x];
//atomicAdd(&dest[x], source[iw*y + x]); // so better but I dont know why it doesnt declared :|
}
}
__global__ void sinc_kernel(int iw, int ih, double a1, double a2, unsigned char *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int Landa = 800, Betta = 100;
double brightness;
int offset_cols = iw;
int offset_rows = ih;
double P_a1 = a1*a1;
double P_a2 = a2*a2;
brightness = -1 * Landa*sin(Pi*pow(P_a1*(x - offset_cols / 2)*(x - offset_cols / 2)*1.0 + P_a2*(y - offset_rows / 2)*(y - offset_rows / 2)*1.0, 0.5)) / (Pi*pow(P_a1*(x - offset_cols / 2)*(x - offset_cols / 2)*1.0 + P_a2*(y - offset_rows / 2)*(y - offset_rows / 2)*1.0, 0.5)) + Betta; // Y must be more than X in rectangular image when cols is more than rows
if (brightness > 255)
brightness = 255;
if (brightness < 0)
brightness = 0;
if (brightness < 50)
dest[iw*y + x] = (unsigned char)brightness;
}
__global__ void generalgradient_kernel(int iw, int ih, int frameCount, unsigned char *source, float *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
float temp;
if (x > 0 && x < iw - 1 && y > 0 && y < ih - 1)
{
temp = dest[iw*y + x];
dest[iw*y + x] = (float)(1.0*((frameCount - 1)*temp + source[iw*y + x]) / frameCount);
}
}
__global__ void boxfilter_kernel(int iw, int ih, unsigned char *source, unsigned char *dest, int bw, int bh)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int count = 0;
float sum = 0.0;
for (int j = -(bh / 2); j <= (bh / 2); j++)
for (int i = -(bw / 2); i <= (bw / 2); i++)
{
if ((x + i) < iw && (x + i) >= 0 && (y + j) < ih && (y + j) >= 0)
{
sum += (float)source[((y + j)*iw) + (x + i)];
count++;
}
}
sum /= (float)count * 2;
dest[(y*iw) + x] = (unsigned char)sum;
}
__global__ void sobelfilter_kernel(int iw, int ih, unsigned char *source, unsigned char *dest)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if (x > 0 && x < iw - 1 && y > 0 && y < ih - 1)
{
int gx = -1 * source[iw*(y - 1) + (x - 1)] + source[iw*(y - 1) + (x + 1)] +
-2 * source[iw*y + (x - 1)] + 2 * source[iw*y + (x + 1)] +
-1 * source[iw*(y + 1) + (x - 1)] + source[iw*(y + 1) + (x + 1)];
int gy = -source[iw*(y - 1) + (x - 1)] - 2 * source[iw*(y - 1) + x]
- source[iw*(y - 1) + (x + 1)] +
source[iw*(y + 1) + (x - 1)] + 2 * source[iw*(y + 1) + x] +
source[iw*(y + 1) + (x + 1)];
dest[iw*y + x] = (unsigned char)sqrt((float)gx*(float)gx + (float)gy*float(gy));
}
}
extern "C" void boxfilter(int iw, int ih, unsigned char *source, unsigned char *dest, int bw, int bh)
{
unsigned char *dev_source, *dev_dest;
cudaHostGetDevicePointer(&dev_source, source, 0);
cudaHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
boxfilter_kernel << <blocks, threads >> >(iw, ih, dev_source, dev_dest, bw, bh);
cudaThreadSynchronize();
}
extern "C" void sobelfilter(int iw, int ih, unsigned char *source, unsigned char *dest)
{
// allocate memory for bitmap
unsigned char *dev_source, *dev_dest;
cudaHostGetDevicePointer(&dev_source, source, 0);
cudaHostGetDevicePointer(&dev_dest, dest, 0);
dim3 block(iw / 16, ih / 16);
dim3 threads(16, 16);
sobelfilter_kernel << <block, threads >> >(iw, ih, dev_source, dev_dest);
cudaThreadSynchronize();
}
extern "C" unsigned char* createImageBuffer(unsigned int bytes)
{
unsigned char *ptr = NULL;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc(&ptr, bytes, cudaHostAllocMapped);
return ptr;
}
extern "C" float * createImageBufferFloat(unsigned int Bytes)
{
float *ptr = NULL;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc(&ptr, Bytes, cudaHostAllocMapped);
return ptr;
}
void desetroyImageBuffer(unsigned char* bytes)
{
cudaFreeHost(bytes);
}
void sinc(int iw, int ih, double a1, double a2, unsigned char *source, unsigned char *dest)
{
unsigned char *dev_source, *dev_dest;
cudaHostGetDevicePointer(&dev_source, source, 0);
cudaHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
sinc_kernel << <blocks, threads >> >(iw, ih, a1, a2, dev_source, dev_dest);
cudaThreadSynchronize();
}
void generalgradient(int iw, int ih, int frameCount, unsigned char *source, float *dest)
{
unsigned char *dev_source;
float *dev_dest;
cudaHostGetDevicePointer(&dev_source, source, 0);
cudaHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
generalgradient_kernel << <blocks, threads >> >(iw, ih, frameCount, dev_source, dev_dest);
cudaThreadSynchronize();
}
extern "C" void treshold(int iw, int ih, int binary_treshold, unsigned char *source, unsigned char *dest)
{
unsigned char *dev_source, *dev_dest;
cudaHostGetDevicePointer(&dev_source, source, 0);
cudaHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
treshold_kernel << <blocks, threads >> >(iw, ih, binary_treshold, dev_source, dev_dest);
cudaThreadSynchronize();
}
extern "C" void profile(int iw, int ih, unsigned char *img, double *myarray)
{
unsigned char *dev_source;
double *dev_dest;
cudaHostGetDevicePointer(&dev_source, img, 0);
cudaHostGetDevicePointer(&dev_dest, myarray, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
Profile_kernel << <blocks, threads >> >(iw, ih, dev_source, dev_dest);
cudaThreadSynchronize();
}
extern "C" double * createdouble(double Bytes)
{
double *ptr = NULL;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc(&ptr, Bytes, cudaHostAllocMapped);
return ptr;
}
extern "C" void general2final(int iw, int ih, float *source, unsigned char *dest)
{
float *dev_source;
unsigned char *dev_dest;
cudaHostGetDevicePointer(&dev_source, source, 0);
cudaHostGetDevicePointer(&dev_dest, dest, 0);
dim3 blocks(iw / 16, ih / 16);
dim3 threads(16, 16);
general2final_kernel << <blocks, threads >> >(iw, ih, dev_source, dev_dest);
cudaThreadSynchronize();
}
|
82d96785fde13da6feb2697d0181817505cc6601.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "cuda_utils.cuh"
#include "linalg/transpose.h"
#include "random/rng.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct TranposeInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const TranposeInputs<T> &dims) {
return os;
}
template <typename T>
class TransposeTest : public ::testing::TestWithParam<TranposeInputs<T>> {
protected:
void SetUp() override {
CUBLAS_CHECK(hipblasCreate(&handle));
CUDA_CHECK(hipStreamCreate(&stream));
params = ::testing::TestWithParam<TranposeInputs<T>>::GetParam();
int len = params.len;
allocate(data, len);
ASSERT(params.len == 9, "This test works only with len=9!");
T data_h[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
updateDevice(data, data_h, len, stream);
allocate(data_trans_ref, len);
T data_ref_h[] = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0};
updateDevice(data_trans_ref, data_ref_h, len, stream);
allocate(data_trans, len);
transpose(data, data_trans, params.n_row, params.n_col, handle, stream);
transpose(data, params.n_row, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(data_trans));
CUDA_CHECK(hipFree(data_trans_ref));
CUBLAS_CHECK(hipblasDestroy(handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
TranposeInputs<T> params;
T *data, *data_trans, *data_trans_ref;
hipblasHandle_t handle;
hipStream_t stream;
};
const std::vector<TranposeInputs<float>> inputsf2 = {
{0.1f, 3 * 3, 3, 3, 1234ULL}};
const std::vector<TranposeInputs<double>> inputsd2 = {
{0.1, 3 * 3, 3, 3, 1234ULL}};
typedef TransposeTest<float> TransposeTestValF;
TEST_P(TransposeTestValF, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef TransposeTest<double> TransposeTestValD;
TEST_P(TransposeTestValD, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
| 82d96785fde13da6feb2697d0181817505cc6601.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "cuda_utils.cuh"
#include "linalg/transpose.h"
#include "random/rng.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename T>
struct TranposeInputs {
T tolerance;
int len;
int n_row;
int n_col;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const TranposeInputs<T> &dims) {
return os;
}
template <typename T>
class TransposeTest : public ::testing::TestWithParam<TranposeInputs<T>> {
protected:
void SetUp() override {
CUBLAS_CHECK(cublasCreate(&handle));
CUDA_CHECK(cudaStreamCreate(&stream));
params = ::testing::TestWithParam<TranposeInputs<T>>::GetParam();
int len = params.len;
allocate(data, len);
ASSERT(params.len == 9, "This test works only with len=9!");
T data_h[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
updateDevice(data, data_h, len, stream);
allocate(data_trans_ref, len);
T data_ref_h[] = {1.0, 4.0, 7.0, 2.0, 5.0, 8.0, 3.0, 6.0, 9.0};
updateDevice(data_trans_ref, data_ref_h, len, stream);
allocate(data_trans, len);
transpose(data, data_trans, params.n_row, params.n_col, handle, stream);
transpose(data, params.n_row, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(data_trans));
CUDA_CHECK(cudaFree(data_trans_ref));
CUBLAS_CHECK(cublasDestroy(handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
TranposeInputs<T> params;
T *data, *data_trans, *data_trans_ref;
cublasHandle_t handle;
cudaStream_t stream;
};
const std::vector<TranposeInputs<float>> inputsf2 = {
{0.1f, 3 * 3, 3, 3, 1234ULL}};
const std::vector<TranposeInputs<double>> inputsd2 = {
{0.1, 3 * 3, 3, 3, 1234ULL}};
typedef TransposeTest<float> TransposeTestValF;
TEST_P(TransposeTestValF, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<float>(params.tolerance)));
}
typedef TransposeTest<double> TransposeTestValD;
TEST_P(TransposeTestValD, Result) {
ASSERT_TRUE(devArrMatch(data_trans_ref, data_trans, params.len,
CompareApproxAbs<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(data_trans_ref, data, params.len,
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(TransposeTests, TransposeTestValD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
96ecd07a43f77dd751258668ee8f4a67111c33ac.hip | // !!! This is a file automatically generated by hipify!!!
#include <unistd.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
__global__ void init(unsigned int seed, hiprandState_t* states) {
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x*blockDim.x+threadIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x*blockDim.x+threadIdx.x]);
}
__global__ void Random(hiprandState_t* states, unsigned int *c)
{
unsigned int ind = blockIdx.x*blockDim.x+threadIdx.x;
c[ind] = 1 + hiprand(&states)%100;
}
int main(void)
{
hiprandState_t* states;
hipMalloc((void**) &states, N * sizeof(hiprandState_t));
int N = 1000000;
hipLaunchKernelGGL(( init), dim3(N/1000), dim3(1000), 0, 0, time(0), states);
int *y, *d_y;
y = (int*)malloc(N*sizeof(int));
hipMalloc((void**) &d_y, N*sizeof(int));
for (int i = 0; i < N; i++) {
y[i] = 0;
}
//hipMemcpy(d_y, y, N*sizeof(int), hipMemcpyHostToDevice);
Random<<<(1000, 1000>>>(states,d_y);
hipMemcpy(y, d_y, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
cout<<y[i]<<endl;
}
/*for(int j = 0;j<1000;j++)
{ int n = 0;
for(int k = 0; k<N;k++){
if(y[k]>1000*j && y[k]<=1000*(j+1)) n++ }
cout<<n<<endl;
}*/
hipFree(d_y);
free(y);
return 0;
}
| 96ecd07a43f77dd751258668ee8f4a67111c33ac.cu | #include <unistd.h>
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
__global__ void init(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x*blockDim.x+threadIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x*blockDim.x+threadIdx.x]);
}
__global__ void Random(curandState_t* states, unsigned int *c)
{
unsigned int ind = blockIdx.x*blockDim.x+threadIdx.x;
c[ind] = 1 + curand(&states)%100;
}
int main(void)
{
curandState_t* states;
cudaMalloc((void**) &states, N * sizeof(curandState_t));
int N = 1000000;
init<<<N/1000, 1000>>>(time(0), states);
int *y, *d_y;
y = (int*)malloc(N*sizeof(int));
cudaMalloc((void**) &d_y, N*sizeof(int));
for (int i = 0; i < N; i++) {
y[i] = 0;
}
//cudaMemcpy(d_y, y, N*sizeof(int), cudaMemcpyHostToDevice);
Random<<<(1000, 1000>>>(states,d_y);
cudaMemcpy(y, d_y, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
cout<<y[i]<<endl;
}
/*for(int j = 0;j<1000;j++)
{ int n = 0;
for(int k = 0; k<N;k++){
if(y[k]>1000*j && y[k]<=1000*(j+1)) n++ }
cout<<n<<endl;
}*/
cudaFree(d_y);
free(y);
return 0;
}
|
7b0dcf82cb56a03d3f49abd525924ebd652dd372.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This program demonstrates the basics of working with cuda. We use
the GPU to add two arrays. We also introduce cuda's approach to
error handling and timing using cuda Events.
This is the main program. You should also look at the header add.h
for the important declarations, and then look at add.cu to see how
to define functions that execute on the GPU.
*/
#include <iostream>
#include "add.h"
int main() {
int matSize = 1000;
int sequential = 1;
int blocks = 1;
int threads = 1;
//get array dimensions
std::cout << "Please enter the dimensions of the matrix (1000<=matSize<=10000):";
std::cin >> matSize;
//std::cout << "you input: " << matSize << std::endl;
//get if we are using cuda or sequential addtion
std::cout << "Sequential or CUDA?(1=Sequential, 0=CUDA):";
std::cin >> sequential;
if(sequential < 1){
std::cout << "Please enter the number of blocks to be used:";
std::cin >> blocks;
if(blocks < 1){
std::cout << "invalid block number, using default of matSize*matSize." << std::endl;
blocks = matSize*matSize;
}
std::cout << "Please enter the number of threads per block(1= no striding):";
std::cin >> threads;
if(threads < 1){
std::cout << "invalid thread number, using default of 1." << std::endl;
threads = 1;
}
/*if(blocks*threads != matSize*matSize){
std::cout << "insufficient blocks and threads used, switching to default." << std::endl;
blocks = matSize*matSize;
threads = 1;
}*/
}
// Arrays on the host (CPU)
//int a[N], b[N], c[N];
int* a[matSize];
int* b[matSize];
int* c[matSize];
for(int iter = 0; iter<matSize;iter++){
a[iter] = new int [matSize];
b[iter] = new int [matSize];
c[iter] = new int [matSize];
for(int cur = 0; cur<matSize;cur++){
a[iter][cur] = iter*cur;
b[iter][cur] = iter*cur;
c[iter][cur] = 0;
}
}
/*
These will point to memory on the GPU - notice the correspondence
between these pointers and the arrays declared above.
*/
int *dev_a, *dev_b, *dev_c;
/*
These calls allocate memory on the GPU (also called the
device). This is similar to C's malloc, except that instead of
directly returning a pointer to the allocated memory, hipMalloc
returns the pointer through its first argument, which must be a
void**. The second argument is the number of bytes we want to
allocate.
NB: the return value of hipMalloc (like most cuda functions) is
an error code. Strictly speaking, we should check this value and
perform error handling if anything went wrong. We do this for the
first call to hipMalloc so you can see what it looks like, but
for all other function calls we just point out that you should do
error checking.
Actually, a good idea would be to wrap this error checking in a
function or macro, which is what the Cuda By Example book does.
*/
hipError_t err = hipMalloc( (void**) &dev_a, matSize * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
hipMalloc( (void**) &dev_b, matSize * sizeof(int));
hipMalloc( (void**) &dev_c, matSize * sizeof(int));
// These lines just fill the host arrays with some data so we can do
// something interesting. Well, so we can add two arrays.
/*for (int i = 0; i < N; ++i) {
a[i] = i;
b[i] = i;
}*/
/*
The following code is responsible for handling timing for code
that executes on the GPU. The cuda approach to this problem uses
events. For timing purposes, an event is essentially a point in
time. We create events for the beginning and end points of the
process we want to time. When we want to start timing, we call
hipEventRecord.
In this case, we want to record the time it takes to transfer data
to the GPU, perform some computations, and transfer data back.
*/
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord( start, 0 );
//sequential addition
if(sequential > 0){
for(int x = 0; x < matSize; x++){
for(int y = 0; y < matSize; y++){
c[x][y] = a[x][y] + b[x][y];
}
}
}else{
/*
Once we have host arrays containing data and we have allocated
memory on the GPU, we have to transfer data from the host to the
device. Again, notice the similarity to C's memcpy function.
The first argument is the destination of the copy - in this case a
pointer to memory allocated on the device. The second argument is
the source of the copy. The third argument is the number of bytes
we want to copy. The last argument is a constant that tells
hipMemcpy the direction of the transfer.
*/
for(int iter = 0; iter < matSize; iter++){
hipMemcpy(dev_a, a[iter], matSize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b[iter], matSize * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c[iter], matSize * sizeof(int), hipMemcpyHostToDevice);
/*
FINALLY we get to run some code on the GPU. At this point, if you
haven't looked at add.cu (in this folder), you should. The
comments in that file explain what the add function does, so here
let's focus on how add is being called. The first thing to notice
is the <<<...>>>, which you should recognize as _not_ being
standard C. This syntactic extension tells nvidia's cuda compiler
how to parallelize the execution of the function. We'll get into
details as the course progresses, but for we'll say that <<<N,
1>>> is creating N _blocks_ of 1 _thread_ each. Each of these
threads is executing add with a different data element (details of
the indexing are in add.cu).
In larger programs, you will typically have many more blocks, and
each block will have many threads. Each thread will handle a
different piece of data, and many threads can execute at the same
time. This is how cuda can get such large speedups.
*/
hipLaunchKernelGGL(( add), dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c);
/*
Unfortunately, the GPU is to some extent a black box. In order to
print the results of our call to add, we have to transfer the data
back to the host. We do that with a call to hipMemcpy, which is
just like the hipMemcpy calls above, except that the direction of
the transfer (given by the last argument) is reversed. In a real
program we would want to check the error code returned by this
function.
*/
hipMemcpy(c[iter], dev_c, matSize * sizeof(int), hipMemcpyDeviceToHost);
}
}
/*
This is the other end of the timing process. We record an event,
synchronize on it, and then figure out the difference in time
between the start and the stop.
We have to call hipEventSynchronize before we can safely _read_
the value of the stop event. This is because the GPU may not have
actually written to the event until all other work has finished.
*/
hipEventRecord( end, 0 );
hipEventSynchronize( end );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, end );
/*
Let's check that the results are what we expect.
*/
for (int i = 0; i < matSize; ++i) {
for(int j = 0; j < matSize; j++){
if (c[i][j] != a[i][j] + b[i][j]) {
std::cerr << "Oh no! Something went wrong. You should check your cuda install and your GPU. :(" << std::endl;
std::cout << "Your program took: " << elapsedTime << " ms." << std::endl;
// clean up events - we should check for error codes here.
hipEventDestroy( start );
hipEventDestroy( end );
// clean up device pointers - just like free in C. We don't have
// to check error codes for this one.
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
exit(1);
}
}
}
/*
Let's let the user know that everything is ok and then display
some information about the times we recorded above.
*/
std::cout << "Yay! Your program's results are correct." << std::endl;
std::cout << "Your program took: " << elapsedTime << " ms." << std::endl;
// Cleanup in the event of success.
hipEventDestroy( start );
hipEventDestroy( end );
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
}
| 7b0dcf82cb56a03d3f49abd525924ebd652dd372.cu | /*
This program demonstrates the basics of working with cuda. We use
the GPU to add two arrays. We also introduce cuda's approach to
error handling and timing using cuda Events.
This is the main program. You should also look at the header add.h
for the important declarations, and then look at add.cu to see how
to define functions that execute on the GPU.
*/
#include <iostream>
#include "add.h"
int main() {
int matSize = 1000;
int sequential = 1;
int blocks = 1;
int threads = 1;
//get array dimensions
std::cout << "Please enter the dimensions of the matrix (1000<=matSize<=10000):";
std::cin >> matSize;
//std::cout << "you input: " << matSize << std::endl;
//get if we are using cuda or sequential addtion
std::cout << "Sequential or CUDA?(1=Sequential, 0=CUDA):";
std::cin >> sequential;
if(sequential < 1){
std::cout << "Please enter the number of blocks to be used:";
std::cin >> blocks;
if(blocks < 1){
std::cout << "invalid block number, using default of matSize*matSize." << std::endl;
blocks = matSize*matSize;
}
std::cout << "Please enter the number of threads per block(1= no striding):";
std::cin >> threads;
if(threads < 1){
std::cout << "invalid thread number, using default of 1." << std::endl;
threads = 1;
}
/*if(blocks*threads != matSize*matSize){
std::cout << "insufficient blocks and threads used, switching to default." << std::endl;
blocks = matSize*matSize;
threads = 1;
}*/
}
// Arrays on the host (CPU)
//int a[N], b[N], c[N];
int* a[matSize];
int* b[matSize];
int* c[matSize];
for(int iter = 0; iter<matSize;iter++){
a[iter] = new int [matSize];
b[iter] = new int [matSize];
c[iter] = new int [matSize];
for(int cur = 0; cur<matSize;cur++){
a[iter][cur] = iter*cur;
b[iter][cur] = iter*cur;
c[iter][cur] = 0;
}
}
/*
These will point to memory on the GPU - notice the correspondence
between these pointers and the arrays declared above.
*/
int *dev_a, *dev_b, *dev_c;
/*
These calls allocate memory on the GPU (also called the
device). This is similar to C's malloc, except that instead of
directly returning a pointer to the allocated memory, cudaMalloc
returns the pointer through its first argument, which must be a
void**. The second argument is the number of bytes we want to
allocate.
NB: the return value of cudaMalloc (like most cuda functions) is
an error code. Strictly speaking, we should check this value and
perform error handling if anything went wrong. We do this for the
first call to cudaMalloc so you can see what it looks like, but
for all other function calls we just point out that you should do
error checking.
Actually, a good idea would be to wrap this error checking in a
function or macro, which is what the Cuda By Example book does.
*/
cudaError_t err = cudaMalloc( (void**) &dev_a, matSize * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
cudaMalloc( (void**) &dev_b, matSize * sizeof(int));
cudaMalloc( (void**) &dev_c, matSize * sizeof(int));
// These lines just fill the host arrays with some data so we can do
// something interesting. Well, so we can add two arrays.
/*for (int i = 0; i < N; ++i) {
a[i] = i;
b[i] = i;
}*/
/*
The following code is responsible for handling timing for code
that executes on the GPU. The cuda approach to this problem uses
events. For timing purposes, an event is essentially a point in
time. We create events for the beginning and end points of the
process we want to time. When we want to start timing, we call
cudaEventRecord.
In this case, we want to record the time it takes to transfer data
to the GPU, perform some computations, and transfer data back.
*/
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord( start, 0 );
//sequential addition
if(sequential > 0){
for(int x = 0; x < matSize; x++){
for(int y = 0; y < matSize; y++){
c[x][y] = a[x][y] + b[x][y];
}
}
}else{
/*
Once we have host arrays containing data and we have allocated
memory on the GPU, we have to transfer data from the host to the
device. Again, notice the similarity to C's memcpy function.
The first argument is the destination of the copy - in this case a
pointer to memory allocated on the device. The second argument is
the source of the copy. The third argument is the number of bytes
we want to copy. The last argument is a constant that tells
cudaMemcpy the direction of the transfer.
*/
for(int iter = 0; iter < matSize; iter++){
cudaMemcpy(dev_a, a[iter], matSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b[iter], matSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c[iter], matSize * sizeof(int), cudaMemcpyHostToDevice);
/*
FINALLY we get to run some code on the GPU. At this point, if you
haven't looked at add.cu (in this folder), you should. The
comments in that file explain what the add function does, so here
let's focus on how add is being called. The first thing to notice
is the <<<...>>>, which you should recognize as _not_ being
standard C. This syntactic extension tells nvidia's cuda compiler
how to parallelize the execution of the function. We'll get into
details as the course progresses, but for we'll say that <<<N,
1>>> is creating N _blocks_ of 1 _thread_ each. Each of these
threads is executing add with a different data element (details of
the indexing are in add.cu).
In larger programs, you will typically have many more blocks, and
each block will have many threads. Each thread will handle a
different piece of data, and many threads can execute at the same
time. This is how cuda can get such large speedups.
*/
add<<<blocks, threads>>>(dev_a, dev_b, dev_c);
/*
Unfortunately, the GPU is to some extent a black box. In order to
print the results of our call to add, we have to transfer the data
back to the host. We do that with a call to cudaMemcpy, which is
just like the cudaMemcpy calls above, except that the direction of
the transfer (given by the last argument) is reversed. In a real
program we would want to check the error code returned by this
function.
*/
cudaMemcpy(c[iter], dev_c, matSize * sizeof(int), cudaMemcpyDeviceToHost);
}
}
/*
This is the other end of the timing process. We record an event,
synchronize on it, and then figure out the difference in time
between the start and the stop.
We have to call cudaEventSynchronize before we can safely _read_
the value of the stop event. This is because the GPU may not have
actually written to the event until all other work has finished.
*/
cudaEventRecord( end, 0 );
cudaEventSynchronize( end );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, end );
/*
Let's check that the results are what we expect.
*/
for (int i = 0; i < matSize; ++i) {
for(int j = 0; j < matSize; j++){
if (c[i][j] != a[i][j] + b[i][j]) {
std::cerr << "Oh no! Something went wrong. You should check your cuda install and your GPU. :(" << std::endl;
std::cout << "Your program took: " << elapsedTime << " ms." << std::endl;
// clean up events - we should check for error codes here.
cudaEventDestroy( start );
cudaEventDestroy( end );
// clean up device pointers - just like free in C. We don't have
// to check error codes for this one.
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(1);
}
}
}
/*
Let's let the user know that everything is ok and then display
some information about the times we recorded above.
*/
std::cout << "Yay! Your program's results are correct." << std::endl;
std::cout << "Your program took: " << elapsedTime << " ms." << std::endl;
// Cleanup in the event of success.
cudaEventDestroy( start );
cudaEventDestroy( end );
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
c75fecbbf1501e7d93f341f47ce6b721a125e6e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LogOpGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe | c75fecbbf1501e7d93f341f47ce6b721a125e6e1.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LogOpGPU(const int nthreads,
const Dtype* in, Dtype* out, const Dtype eps)
{
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = log(max(in[index], eps));
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::compute_intermediate_values_of_gpu() {
// compute the corresponding variables
const int count = prob_.count();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* ones_data = ones_.gpu_data();
Dtype* log_prob_data = log_prob_.mutable_gpu_data();
Dtype* power_prob_data = power_prob_.mutable_gpu_data();
/// log(p_t)
const int nthreads = prob_.count();
const Dtype eps = Dtype(FLT_MIN); // where FLT_MIN = 1.17549e-38, here u can change it
// more stable
// NOLINT_NEXT_LINE(whitespace/operators)
LogOpGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, log_prob_data, eps);
/// caffe_gpu_log(count, prob_data, log_prob_data);
/// (1 - p_t) ^ gamma
caffe_gpu_sub(count, ones_data, prob_data, power_prob_data);
caffe_gpu_powx(count, power_prob_.gpu_data(), gamma_, power_prob_data);
caffe_gpu_scal(count, alpha_, power_prob_data);
}
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
const Dtype* label,
Dtype* loss,
const int num,
const int dim,
const int spatial_dim,
const bool has_ignore_label_,
const int ignore_label_,
Dtype* counts)
{
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
int ind = n * dim + label_value * spatial_dim + s;
// loss[index] = -max(power_prob_data[ind] * log_prob_data[ind], Dtype(log(Dtype(FLT_MIN))));
loss[index] = -power_prob_data[ind] * log_prob_data[ind];
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
// compute all needed values
compute_intermediate_values_of_gpu();
// const Dtype* prob_data = prob_.gpu_data();
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, log_prob_data, power_prob_data,
label, loss_data,outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads,
const Dtype* top,
const Dtype* label,
const Dtype* prob_data,
const Dtype* log_prob_data,
const Dtype* power_prob_data,
Dtype* bottom_diff,
const int num,
const int dim,
const int spatial_dim,
const Dtype gamma,
const bool has_ignore_label_,
const int ignore_label_,
const Dtype eps,
Dtype* counts)
{
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
// the gradient from FL w.r.t p_t, here ignore the `sign`
int ind_i = n * dim + label_value * spatial_dim + s; // index of ground-truth label
Dtype grad = 0 - gamma * (power_prob_data[ind_i] / max(1 - prob_data[ind_i], eps))
* log_prob_data[ind_i] * prob_data[ind_i]
+ power_prob_data[ind_i];
// the gradient w.r.t input data x
for (int c = 0; c < channels; ++c) {
int ind_j = n * dim + c * spatial_dim + s;
if(c == label_value) {
// if i == j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * (prob_data[ind_i] - 1);
} else {
// if i != j, (here i,j are refered for derivative of softmax)
bottom_diff[ind_j] = grad * prob_data[ind_j];
}
}
// count
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
const Dtype eps = 1e-10;
// intermidiate
const Dtype* log_prob_data = log_prob_.gpu_data();
const Dtype* power_prob_data = power_prob_.gpu_data();
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, prob_data, log_prob_data, power_prob_data,
bottom_diff, outer_num_, dim, inner_num_, gamma_, has_ignore_label_, ignore_label_, eps, counts);
// Only launch another CUDA kernel if we actually need the count of valid outputs.
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
// Scale gradient
const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe |
c5cd19fa02839e7267951bdf7fdb010e93de1287.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===--- omptarget-nvptx.cu - NVPTX OpenMP GPU initialization ---- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the initialization code for the GPU
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "state-queue.h"
////////////////////////////////////////////////////////////////////////////////
// global data tables
////////////////////////////////////////////////////////////////////////////////
extern __device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
extern __device__ __shared__ omptarget_nvptx_ThreadPrivateContext
*omptarget_nvptx_threadPrivateContext;
//
// The team master sets the outlined function and its arguments in these
// variables to communicate with the workers. Since they are in shared memory,
// there is one copy of these variables for each kernel, instance, and team.
//
extern __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
extern __device__ __shared__ int8_t execution_mode;
////////////////////////////////////////////////////////////////////////////////
// init entry points
////////////////////////////////////////////////////////////////////////////////
INLINE unsigned smid() {
unsigned id;
asm("mov.u32 %0, %%smid;" : "=r"(id));
return id;
}
INLINE unsigned n_sm() {
unsigned n_sm;
asm("mov.u32 %0, %%nsmid;" : "=r"(n_sm));
return n_sm;
}
EXTERN void __kmpc_kernel_init(int ThreadLimit) {
PRINT(LD_IO, "call to __kmpc_kernel_init with version %f\n",
OMPTARGET_NVPTX_VERSION);
int threadIdInBlock = GetThreadIdInBlock();
ASSERT0(LT_FUSSY, threadIdInBlock == GetMasterThreadID(),
"__kmpc_kernel_init() must be called by team master warp only!");
PRINT0(LD_IO, "call to __kmpc_kernel_init for master\n");
// Get a state object from the queue.
int slot = smid() % MAX_SM;
omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue();
setGenericMode();
// init thread private
int threadId = GetLogicalThreadIdInBlock();
omptarget_nvptx_threadPrivateContext->InitThreadPrivateContext(
threadId);
// init team context
omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor();
currTeamDescr.InitTeamDescr();
// this thread will start execution... has to update its task ICV
// to point to the level zero task ICV. That ICV was init in
// InitTeamDescr()
omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(
threadId, currTeamDescr.LevelZeroTaskDescr());
// set number of threads and thread limit in team to started value
omptarget_nvptx_TaskDescr *currTaskDescr =
omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(
threadId);
currTaskDescr->NThreads() = GetNumberOfWorkersInTeam();
currTaskDescr->ThreadLimit() = ThreadLimit;
}
EXTERN void __kmpc_kernel_deinit() {
// Enqueue omp state object for use by another team.
int slot = smid() % MAX_SM;
omptarget_nvptx_device_State[slot].Enqueue(omptarget_nvptx_threadPrivateContext);
// Done with work. Kill the workers.
omptarget_nvptx_workFn = 0;
}
EXTERN void __kmpc_spmd_kernel_init(int ThreadLimit,
short RequiresOMPRuntime,
short RequiresDataSharing) {
PRINT0(LD_IO, "call to __kmpc_spmd_kernel_init\n");
if (!RequiresOMPRuntime) {
// If OMP runtime is not required don't initialize OMP state.
setNoOMPMode();
return;
}
setSPMDMode();
//
// Team Context Initialization.
//
// In SPMD mode there is no master thread so use any cuda thread for team
// context initialization.
int threadId = GetThreadIdInBlock();
if (threadId == 0) {
// Get a state object from the queue.
int slot = smid() % MAX_SM;
omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue();
omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor();
omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor();
// init team context
currTeamDescr.InitTeamDescr();
// init counters (copy start to init)
workDescr.CounterGroup().Reset();
}
__syncthreads();
omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor();
omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor();
//
// Initialize task descr for each thread.
//
omptarget_nvptx_TaskDescr *newTaskDescr =
omptarget_nvptx_threadPrivateContext->Level1TaskDescr(threadId);
ASSERT0(LT_FUSSY, newTaskDescr, "expected a task descr");
newTaskDescr->InitLevelOneTaskDescr(
ThreadLimit, currTeamDescr.LevelZeroTaskDescr());
// install new top descriptor
omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId,
newTaskDescr);
// init thread private from init value
workDescr.CounterGroup().Init(
omptarget_nvptx_threadPrivateContext->Priv(threadId));
PRINT(LD_PAR, "thread will execute parallel region with id %d in a team of "
"%d threads\n",
newTaskDescr->ThreadId(), newTaskDescr->NThreads());
if (RequiresDataSharing && threadId % WARPSIZE == 0) {
// Warp master innitializes data sharing environment.
unsigned WID = threadId >> DS_Max_Worker_Warp_Size_Log2;
__kmpc_data_sharing_slot *RootS = currTeamDescr.RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void*)&RootS->Data[0];
}
}
EXTERN void __kmpc_spmd_kernel_deinit() {
// We're not going to pop the task descr stack of each thread since
// there are no more parallel regions in SPMD mode.
__syncthreads();
int threadId = GetThreadIdInBlock();
if (threadId == 0) {
// Enqueue omp state object for use by another team.
int slot = smid() % MAX_SM;
omptarget_nvptx_device_State[slot].Enqueue(omptarget_nvptx_threadPrivateContext);
}
}
| c5cd19fa02839e7267951bdf7fdb010e93de1287.cu | //===--- omptarget-nvptx.cu - NVPTX OpenMP GPU initialization ---- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the initialization code for the GPU
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "state-queue.h"
////////////////////////////////////////////////////////////////////////////////
// global data tables
////////////////////////////////////////////////////////////////////////////////
extern __device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
extern __device__ __shared__ omptarget_nvptx_ThreadPrivateContext
*omptarget_nvptx_threadPrivateContext;
//
// The team master sets the outlined function and its arguments in these
// variables to communicate with the workers. Since they are in shared memory,
// there is one copy of these variables for each kernel, instance, and team.
//
extern __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
extern __device__ __shared__ int8_t execution_mode;
////////////////////////////////////////////////////////////////////////////////
// init entry points
////////////////////////////////////////////////////////////////////////////////
INLINE unsigned smid() {
unsigned id;
asm("mov.u32 %0, %%smid;" : "=r"(id));
return id;
}
INLINE unsigned n_sm() {
unsigned n_sm;
asm("mov.u32 %0, %%nsmid;" : "=r"(n_sm));
return n_sm;
}
EXTERN void __kmpc_kernel_init(int ThreadLimit) {
PRINT(LD_IO, "call to __kmpc_kernel_init with version %f\n",
OMPTARGET_NVPTX_VERSION);
int threadIdInBlock = GetThreadIdInBlock();
ASSERT0(LT_FUSSY, threadIdInBlock == GetMasterThreadID(),
"__kmpc_kernel_init() must be called by team master warp only!");
PRINT0(LD_IO, "call to __kmpc_kernel_init for master\n");
// Get a state object from the queue.
int slot = smid() % MAX_SM;
omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue();
setGenericMode();
// init thread private
int threadId = GetLogicalThreadIdInBlock();
omptarget_nvptx_threadPrivateContext->InitThreadPrivateContext(
threadId);
// init team context
omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor();
currTeamDescr.InitTeamDescr();
// this thread will start execution... has to update its task ICV
// to point to the level zero task ICV. That ICV was init in
// InitTeamDescr()
omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(
threadId, currTeamDescr.LevelZeroTaskDescr());
// set number of threads and thread limit in team to started value
omptarget_nvptx_TaskDescr *currTaskDescr =
omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(
threadId);
currTaskDescr->NThreads() = GetNumberOfWorkersInTeam();
currTaskDescr->ThreadLimit() = ThreadLimit;
}
EXTERN void __kmpc_kernel_deinit() {
// Enqueue omp state object for use by another team.
int slot = smid() % MAX_SM;
omptarget_nvptx_device_State[slot].Enqueue(omptarget_nvptx_threadPrivateContext);
// Done with work. Kill the workers.
omptarget_nvptx_workFn = 0;
}
EXTERN void __kmpc_spmd_kernel_init(int ThreadLimit,
short RequiresOMPRuntime,
short RequiresDataSharing) {
PRINT0(LD_IO, "call to __kmpc_spmd_kernel_init\n");
if (!RequiresOMPRuntime) {
// If OMP runtime is not required don't initialize OMP state.
setNoOMPMode();
return;
}
setSPMDMode();
//
// Team Context Initialization.
//
// In SPMD mode there is no master thread so use any cuda thread for team
// context initialization.
int threadId = GetThreadIdInBlock();
if (threadId == 0) {
// Get a state object from the queue.
int slot = smid() % MAX_SM;
omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue();
omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor();
omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor();
// init team context
currTeamDescr.InitTeamDescr();
// init counters (copy start to init)
workDescr.CounterGroup().Reset();
}
__syncthreads();
omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor();
omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor();
//
// Initialize task descr for each thread.
//
omptarget_nvptx_TaskDescr *newTaskDescr =
omptarget_nvptx_threadPrivateContext->Level1TaskDescr(threadId);
ASSERT0(LT_FUSSY, newTaskDescr, "expected a task descr");
newTaskDescr->InitLevelOneTaskDescr(
ThreadLimit, currTeamDescr.LevelZeroTaskDescr());
// install new top descriptor
omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId,
newTaskDescr);
// init thread private from init value
workDescr.CounterGroup().Init(
omptarget_nvptx_threadPrivateContext->Priv(threadId));
PRINT(LD_PAR, "thread will execute parallel region with id %d in a team of "
"%d threads\n",
newTaskDescr->ThreadId(), newTaskDescr->NThreads());
if (RequiresDataSharing && threadId % WARPSIZE == 0) {
// Warp master innitializes data sharing environment.
unsigned WID = threadId >> DS_Max_Worker_Warp_Size_Log2;
__kmpc_data_sharing_slot *RootS = currTeamDescr.RootS(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void*)&RootS->Data[0];
}
}
EXTERN void __kmpc_spmd_kernel_deinit() {
// We're not going to pop the task descr stack of each thread since
// there are no more parallel regions in SPMD mode.
__syncthreads();
int threadId = GetThreadIdInBlock();
if (threadId == 0) {
// Enqueue omp state object for use by another team.
int slot = smid() % MAX_SM;
omptarget_nvptx_device_State[slot].Enqueue(omptarget_nvptx_threadPrivateContext);
}
}
|
2734b1984823387362b36958772d0ca7d707bc1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void replaceNaNKernel( int numberEntriesPerInstance, int numberIterations, float* source, float* destination) {
int instanceIndex = blockIdx.x;
int instanceStart = instanceIndex * numberEntriesPerInstance;
int startWithinThread = instanceStart + blockIdx.y * blockDim.x * numberIterations + threadIdx.x * numberIterations;
for(int index = startWithinThread; index < min(startWithinThread + numberIterations, instanceStart + numberEntriesPerInstance); index++) {
float currentValue = source[index];
destination[index] = isnan(currentValue) ? 0.0f : currentValue;
}
} | 2734b1984823387362b36958772d0ca7d707bc1c.cu | #include "includes.h"
__global__ void replaceNaNKernel( int numberEntriesPerInstance, int numberIterations, float* source, float* destination) {
int instanceIndex = blockIdx.x;
int instanceStart = instanceIndex * numberEntriesPerInstance;
int startWithinThread = instanceStart + blockIdx.y * blockDim.x * numberIterations + threadIdx.x * numberIterations;
for(int index = startWithinThread; index < min(startWithinThread + numberIterations, instanceStart + numberEntriesPerInstance); index++) {
float currentValue = source[index];
destination[index] = isnan(currentValue) ? 0.0f : currentValue;
}
} |
42ed3558bf14b4d2460aabdbe3e8f3ef500caa59.hip | // !!! This is a file automatically generated by hipify!!!
/* Compile: make
Execute: ./CNN
This file is the main file of all code and it will call the kernel functions from device.
*/
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
#define BLOCKSIZE 64
#define GRIDSIZE 64
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
/* Define layers of CNN*/
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);//convolutional layer
static Layer l_s1 = Layer(4*4, 1, 6*6*6); //pooling
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
/*Load MNIST dataset */
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
hipError_t err = hipInit(0);
if (err != hipSuccess) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
test();
return 0;
}
/* Forward propagation of a single row in dataset*/
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
// Initial the value of weights of all layers
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
/*input data sent*/
int datasize[1] ;
datasize[0] = 28;
l_input.setOutput((float *)input,(int *)datasize);
// Call the kernel funciton of computation of convolutional layer
hipLaunchKernelGGL(( fp_preact_c1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*) l_input.output, (float*) l_c1.preact, (float*) l_c1.weight, l_c1.preactsize,28,5,6);
hipLaunchKernelGGL(( fp_bias_c1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_c1.preact, l_c1.bias,l_c1.preactsize, 6);
hipLaunchKernelGGL(( apply_step_function), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_c1.preact, l_c1.output, l_c1.O);
// Call the kernel function of computation of pooling layer
hipLaunchKernelGGL(( fp_preact_s1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_c1.output, (float*)l_s1.preact, (float*)l_s1.weight,l_s1.preactsize,l_c1.preactsize,l_c1.N,4);
hipLaunchKernelGGL(( fp_bias_s1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_s1.preact, l_s1.bias, l_s1.preactsize,l_c1.N);
hipLaunchKernelGGL(( apply_step_function), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_s1.preact, l_s1.output, l_s1.O);
// Call the kernel function of the coputation of dense layer and output layer
hipLaunchKernelGGL(( fp_preact_f), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float* )l_s1.output, l_f.preact, (float*)l_f.weight, l_s1.preactsize, l_c1.N,l_f.O);
hipLaunchKernelGGL(( fp_bias_f), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_f.preact, l_f.bias,l_f.O);
hipLaunchKernelGGL(( apply_step_function), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
// Back Propogation of the output layer
hipLaunchKernelGGL(( bp_weight_f), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_f.d_weight, l_f.d_preact, (float*)l_s1.output, l_f.O, l_c1.N,l_s1.preactsize);
hipLaunchKernelGGL(( bp_bias_f), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_f.bias, l_f.d_preact,l_f.O);
// Calculate the back Propogation of the pooling layers
hipLaunchKernelGGL(( bp_output_s1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_s1.d_output, (float*)l_f.weight, l_f.d_preact, l_f.O,l_c1.N,l_s1.preactsize);
hipLaunchKernelGGL(( bp_preact_s1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_s1.d_preact, (float*)l_s1.d_output, (float*)l_s1.preact, l_c1.N,l_s1.preactsize);
hipLaunchKernelGGL(( bp_weight_s1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_s1.d_weight, (float*)l_s1.d_preact, (float*)l_c1.output, l_s1.N,4,l_c1.N,l_s1.preactsize,l_c1.preactsize);
hipLaunchKernelGGL(( bp_bias_s1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_s1.bias, (float*)l_s1.d_preact, l_c1.N, l_s1.preactsize);
// Calculate the back Propogation of the convolutional layer and input layer
hipLaunchKernelGGL(( bp_output_c1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_c1.d_output, (float*)l_s1.weight, (float*)l_s1.d_preact, l_s1.N,4,l_c1.N,l_s1.preactsize,l_c1.preactsize);
hipLaunchKernelGGL(( bp_preact_c1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_c1.d_preact, (float*)l_c1.d_output, (float*)l_c1.preact,l_c1.N,l_c1.preactsize);
hipLaunchKernelGGL(( bp_weight_c1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, (float*)l_c1.d_weight, (float*)l_c1.d_preact, (float*)l_input.output,l_c1.N,5,l_c1.preactsize,l_input.preactsize);
hipLaunchKernelGGL(( bp_bias_c1), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_c1.bias, (float*)l_c1.d_preact, l_c1.N, l_c1.preactsize);
// Update the weights of layers
hipLaunchKernelGGL(( apply_grad), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_f.weight, l_f.d_weight, l_f.M * l_f.N);
hipLaunchKernelGGL(( apply_grad), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
hipLaunchKernelGGL(( apply_grad), dim3(BLOCKSIZE), dim3(GRIDSIZE), 0, 0, l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static hipblasHandle_t blas;
hipblasCreate(&blas);
float err;
//Define the maximum number of iterations
int iter = 50;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10);
hipblasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err); //calculate the norm2
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, time_on_gpu: %lf\n", err, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
| 42ed3558bf14b4d2460aabdbe3e8f3ef500caa59.cu | /* Compile: make
Execute: ./CNN
This file is the main file of all code and it will call the kernel functions from device.
*/
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
#define BLOCKSIZE 64
#define GRIDSIZE 64
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
/* Define layers of CNN*/
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);//convolutional layer
static Layer l_s1 = Layer(4*4, 1, 6*6*6); //pooling
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
/*Load MNIST dataset */
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
CUresult err = cuInit(0);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
test();
return 0;
}
/* Forward propagation of a single row in dataset*/
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
// Initial the value of weights of all layers
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
/*input data sent*/
int datasize[1] ;
datasize[0] = 28;
l_input.setOutput((float *)input,(int *)datasize);
// Call the kernel funciton of computation of convolutional layer
fp_preact_c1<<<BLOCKSIZE, GRIDSIZE>>>((float*) l_input.output, (float*) l_c1.preact, (float*) l_c1.weight, l_c1.preactsize,28,5,6);
fp_bias_c1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_c1.preact, l_c1.bias,l_c1.preactsize, 6);
apply_step_function<<<BLOCKSIZE, GRIDSIZE>>>(l_c1.preact, l_c1.output, l_c1.O);
// Call the kernel function of computation of pooling layer
fp_preact_s1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_c1.output, (float*)l_s1.preact, (float*)l_s1.weight,l_s1.preactsize,l_c1.preactsize,l_c1.N,4);
fp_bias_s1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_s1.preact, l_s1.bias, l_s1.preactsize,l_c1.N);
apply_step_function<<<BLOCKSIZE, GRIDSIZE>>>(l_s1.preact, l_s1.output, l_s1.O);
// Call the kernel function of the coputation of dense layer and output layer
fp_preact_f<<<BLOCKSIZE, GRIDSIZE>>>((float* )l_s1.output, l_f.preact, (float*)l_f.weight, l_s1.preactsize, l_c1.N,l_f.O);
fp_bias_f<<<BLOCKSIZE, GRIDSIZE>>>(l_f.preact, l_f.bias,l_f.O);
apply_step_function<<<BLOCKSIZE, GRIDSIZE>>>(l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
// Back Propogation of the output layer
bp_weight_f<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_f.d_weight, l_f.d_preact, (float*)l_s1.output, l_f.O, l_c1.N,l_s1.preactsize);
bp_bias_f<<<BLOCKSIZE, GRIDSIZE>>>(l_f.bias, l_f.d_preact,l_f.O);
// Calculate the back Propogation of the pooling layers
bp_output_s1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_s1.d_output, (float*)l_f.weight, l_f.d_preact, l_f.O,l_c1.N,l_s1.preactsize);
bp_preact_s1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_s1.d_preact, (float*)l_s1.d_output, (float*)l_s1.preact, l_c1.N,l_s1.preactsize);
bp_weight_s1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_s1.d_weight, (float*)l_s1.d_preact, (float*)l_c1.output, l_s1.N,4,l_c1.N,l_s1.preactsize,l_c1.preactsize);
bp_bias_s1<<<BLOCKSIZE, GRIDSIZE>>>(l_s1.bias, (float*)l_s1.d_preact, l_c1.N, l_s1.preactsize);
// Calculate the back Propogation of the convolutional layer and input layer
bp_output_c1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_c1.d_output, (float*)l_s1.weight, (float*)l_s1.d_preact, l_s1.N,4,l_c1.N,l_s1.preactsize,l_c1.preactsize);
bp_preact_c1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_c1.d_preact, (float*)l_c1.d_output, (float*)l_c1.preact,l_c1.N,l_c1.preactsize);
bp_weight_c1<<<BLOCKSIZE, GRIDSIZE>>>((float*)l_c1.d_weight, (float*)l_c1.d_preact, (float*)l_input.output,l_c1.N,5,l_c1.preactsize,l_input.preactsize);
bp_bias_c1<<<BLOCKSIZE, GRIDSIZE>>>(l_c1.bias, (float*)l_c1.d_preact, l_c1.N, l_c1.preactsize);
// Update the weights of layers
apply_grad<<<BLOCKSIZE, GRIDSIZE>>>(l_f.weight, l_f.d_weight, l_f.M * l_f.N);
apply_grad<<<BLOCKSIZE, GRIDSIZE>>>(l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
apply_grad<<<BLOCKSIZE, GRIDSIZE>>>(l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static cublasHandle_t blas;
cublasCreate(&blas);
float err;
//Define the maximum number of iterations
int iter = 50;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10);
cublasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err); //calculate the norm2
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, time_on_gpu: %lf\n", err, time_taken);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
|
71f59fb5fc354db1b53b8ec1ef7d189459cb640f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <cmath>
using namespace std;
const int TILE_WIDTH = 16;
__global__ void MatrixMulKernel(int *d_M,int *d_N,int *d_P,int m,int n,int k)
{
__shared__ int ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//Identify the row and column of the Pd element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int pValue = 0;
//loop over the Md and Nd tiles required to comput the Pd element
for(int t = 0; t < (n-1) / TILE_WIDTH + 1; ++t)
{
if(row < m && t * TILE_WIDTH + tx < n)
ds_M[ty][tx] = d_M[row * n + t * TILE_WIDTH + tx];
else
ds_M[ty][tx] = 0;
if(col < k && t * TILE_WIDTH + ty < n)
ds_N[ty][tx] = d_N[(t * TILE_WIDTH + ty) * k + col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for(int i = 0; i < TILE_WIDTH; ++i)
pValue += ds_M[ty][i] * ds_N[i][tx];
__syncthreads();
}
if(row < m && col < k)
d_P[row * k + col] = pValue;
}
int main()
{
//freopen("out","w",stdout);
int m = 600, n = 700, k = 1000;
int *h_M, *h_N, *d_M, *d_N;
int *h_P, *d_P;
size_t sizeM = m * n * sizeof(int);
size_t sizeN = n * k * sizeof(int);
size_t sizeP = m * k * sizeof(int);
h_M = (int *) malloc(sizeM);
h_N = (int *) malloc(sizeN);
h_P = (int *) malloc(sizeP);
hipMalloc(&d_M,sizeM);
hipMalloc(&d_N,sizeN);
hipMalloc(&d_P,sizeP);
for(int i = 0; i < m * n; ++i)
{
if(i % 2 == 0)
h_M[i] = 1;
else
h_M[i] = 0;
}
for(int i = 0;i < n * k; ++i)
{
if(i % 2 == 0)
h_N[i] = 0;
else
h_N[i] = 1;
}
hipMemcpy(d_M,h_M,sizeM,hipMemcpyHostToDevice);
hipMemcpy(d_N,h_N,sizeN,hipMemcpyHostToDevice);
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
dim3 grid((int)ceil(k*1.0 / TILE_WIDTH), (int)ceil(m*1.0/ TILE_WIDTH));
dim3 block(TILE_WIDTH,TILE_WIDTH);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid),dim3(block), 0, 0, d_M,d_N,d_P,m,n,k);
hipEventRecord(stop,0);
//hipDeviceSynchronize();
hipEventSynchronize(stop);
float ElapsedTime;
hipEventElapsedTime(&ElapsedTime,start,stop);
printf("Kernel Elpased Time: %.3f ms\n",ElapsedTime);
hipMemcpy(h_P,d_P,sizeP,hipMemcpyDeviceToHost);
/*
for(int i = 0; i < m * k; ++i)
printf("%d\n",h_P[i]);
printf("\n");
*/
return 0;
}
| 71f59fb5fc354db1b53b8ec1ef7d189459cb640f.cu | #include <iostream>
#include <cstdio>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cmath>
using namespace std;
const int TILE_WIDTH = 16;
__global__ void MatrixMulKernel(int *d_M,int *d_N,int *d_P,int m,int n,int k)
{
__shared__ int ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ int ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
//Identify the row and column of the Pd element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int pValue = 0;
//loop over the Md and Nd tiles required to comput the Pd element
for(int t = 0; t < (n-1) / TILE_WIDTH + 1; ++t)
{
if(row < m && t * TILE_WIDTH + tx < n)
ds_M[ty][tx] = d_M[row * n + t * TILE_WIDTH + tx];
else
ds_M[ty][tx] = 0;
if(col < k && t * TILE_WIDTH + ty < n)
ds_N[ty][tx] = d_N[(t * TILE_WIDTH + ty) * k + col];
else
ds_N[ty][tx] = 0;
__syncthreads();
for(int i = 0; i < TILE_WIDTH; ++i)
pValue += ds_M[ty][i] * ds_N[i][tx];
__syncthreads();
}
if(row < m && col < k)
d_P[row * k + col] = pValue;
}
int main()
{
//freopen("out","w",stdout);
int m = 600, n = 700, k = 1000;
int *h_M, *h_N, *d_M, *d_N;
int *h_P, *d_P;
size_t sizeM = m * n * sizeof(int);
size_t sizeN = n * k * sizeof(int);
size_t sizeP = m * k * sizeof(int);
h_M = (int *) malloc(sizeM);
h_N = (int *) malloc(sizeN);
h_P = (int *) malloc(sizeP);
cudaMalloc(&d_M,sizeM);
cudaMalloc(&d_N,sizeN);
cudaMalloc(&d_P,sizeP);
for(int i = 0; i < m * n; ++i)
{
if(i % 2 == 0)
h_M[i] = 1;
else
h_M[i] = 0;
}
for(int i = 0;i < n * k; ++i)
{
if(i % 2 == 0)
h_N[i] = 0;
else
h_N[i] = 1;
}
cudaMemcpy(d_M,h_M,sizeM,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,sizeN,cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dim3 grid((int)ceil(k*1.0 / TILE_WIDTH), (int)ceil(m*1.0/ TILE_WIDTH));
dim3 block(TILE_WIDTH,TILE_WIDTH);
MatrixMulKernel<<<grid,block>>>(d_M,d_N,d_P,m,n,k);
cudaEventRecord(stop,0);
//cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float ElapsedTime;
cudaEventElapsedTime(&ElapsedTime,start,stop);
printf("Kernel Elpased Time: %.3f ms\n",ElapsedTime);
cudaMemcpy(h_P,d_P,sizeP,cudaMemcpyDeviceToHost);
/*
for(int i = 0; i < m * k; ++i)
printf("%d\n",h_P[i]);
printf("\n");
*/
return 0;
}
|
c5d4061dc48b748547153bc0ccdcf9068b56c1e2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define USING_SHARED_MEMORY 1
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#if USING_SHARED_MEMORY
const int blockSize_sharedMemory = 128;
const int logNumBanks = 5;
#define CONFLICT_FREE_OFFSET(n) ((n) >> logNumBanks)
__global__ void kernScanPerBlock(int n, int *dev_data, int *dev_blockSum) {
if (n == 1) {
dev_data[0] = 0;
return;
}
if (threadIdx.x >= n / 2) {
return;
}
__shared__ int temp[2 * blockSize_sharedMemory + CONFLICT_FREE_OFFSET(2 * blockSize_sharedMemory)];
dev_data += blockDim.x * blockIdx.x * 2;
if (n > blockDim.x * 2) {
n = blockDim.x * 2;
}
int i = threadIdx.x;
int j = threadIdx.x + n / 2;
int ti = i + CONFLICT_FREE_OFFSET(i);
int tj = j + CONFLICT_FREE_OFFSET(j);
temp[ti] = dev_data[i];
temp[tj] = dev_data[j];
int lastElement = 0;
if (dev_blockSum && threadIdx.x == blockDim.x - 1) {
lastElement = temp[tj];
}
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d) {
int i = offset * (2 * threadIdx.x + 1) - 1;
int j = offset * (2 * threadIdx.x + 2) - 1;
i += CONFLICT_FREE_OFFSET(i);
j += CONFLICT_FREE_OFFSET(j);
temp[j] += temp[i];
}
offset *= 2;
}
if (threadIdx.x == 0) {
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (threadIdx.x < d) {
int i = offset * (2 * threadIdx.x + 1) - 1;
int j = offset * (2 * threadIdx.x + 2) - 1;
i += CONFLICT_FREE_OFFSET(i);
j += CONFLICT_FREE_OFFSET(j);
int t = temp[i];
temp[i] = temp[j];
temp[j] += t;
}
}
__syncthreads();
dev_data[i] = temp[ti];
dev_data[j] = temp[tj];
if (dev_blockSum && threadIdx.x == blockDim.x - 1) {
dev_blockSum[blockIdx.x] = lastElement + temp[tj];
}
}
__global__ void kernAddPerBlock(int *dev_data, int *dev_add) {
int blockSum = dev_add[blockIdx.x];
dev_data += blockIdx.x * blockDim.x * 2;
dev_data[threadIdx.x] += blockSum;
dev_data[threadIdx.x + blockDim.x] += blockSum;
}
void scanHelper(int size, int *dev_data) {
if (size > 2 * blockSize_sharedMemory) {
int blocks = size / (2 * blockSize_sharedMemory);
int *dev_blockSum;
hipMalloc((void**) &dev_blockSum, blocks * sizeof(int));
hipLaunchKernelGGL(( kernScanPerBlock), dim3(blocks), dim3(blockSize_sharedMemory), 0, 0, size, dev_data, dev_blockSum);
scanHelper(blocks, dev_blockSum);
hipLaunchKernelGGL(( kernAddPerBlock), dim3(blocks), dim3(blockSize_sharedMemory), 0, 0, dev_data, dev_blockSum);
hipFree(dev_blockSum);
} else {
hipLaunchKernelGGL(( kernScanPerBlock), dim3(1), dim3(blockSize_sharedMemory), 0, 0, size, dev_data, nullptr);
}
}
#else
__global__ void kernScanUpSweepPhase(int threads, int *dev_temp, int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= threads) {
return;
}
index = (index + 1) * offset * 2 - 1;
dev_temp[index] += dev_temp[index - offset];
}
__global__ void kernScanDownSweepPhase(int threads, int *dev_temp, int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= threads) {
return;
}
index = (index + 1) * offset * 2 - 1;
int t = dev_temp[index - offset];
dev_temp[index - offset] = dev_temp[index];
dev_temp[index] += t;
}
void scanHelper(int size, int *dev_temp) {
int threads = size / 2;
int offset = 1;
for (; threads > 0; threads /= 2, offset *= 2) {
dim3 blocks((threads + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernScanUpSweepPhase), dim3(blocks), dim3(blockSize), 0, 0, threads, dev_temp, offset);
}
hipMemset(dev_temp + size - 1, 0, sizeof(int));
threads = 1;
offset = size / 2;
for (; offset > 0; offset /= 2, threads *= 2) {
dim3 blocks((threads + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernScanDownSweepPhase), dim3(blocks), dim3(blockSize), 0, 0, threads, dev_temp, offset);
}
}
#endif
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int size = 1 << ilog2ceil(n);
int *dev_temp;
hipMalloc((void**) &dev_temp, size * sizeof(int));
hipMemcpy(dev_temp, idata, n * sizeof(int), hipMemcpyHostToDevice);
hipMemset(dev_temp + n, 0, (size - n) * sizeof(int));
timer().startGpuTimer();
scanHelper(size, dev_temp);
timer().endGpuTimer();
hipMemcpy(odata, dev_temp, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_temp);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
dim3 blocks((n + blockSize - 1) / blockSize);
int size = 1 << ilog2ceil(n);
int *dev_idata, *dev_odata, *dev_bools, *dev_indices;
hipMalloc((void**) &dev_idata, n * sizeof(int));
hipMalloc((void**) &dev_odata, n * sizeof(int));
hipMalloc((void**) &dev_bools, n * sizeof(int));
hipMalloc((void**) &dev_indices, size * sizeof(int));
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
hipMemset(dev_indices + n, 0, (size - n) * sizeof(int));
timer().startGpuTimer();
hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean), dim3(blocks), dim3(blockSize), 0, 0, n, dev_bools, dev_idata);
hipMemcpy(dev_indices, dev_bools, n * sizeof(int), hipMemcpyDeviceToDevice);
scanHelper(size, dev_indices);
hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(blocks), dim3(blockSize), 0, 0, n, dev_odata, dev_idata, dev_bools, dev_indices);
timer().endGpuTimer();
int lastBool, lastIdx;
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&lastBool, dev_bools + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&lastIdx, dev_indices + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_bools);
hipFree(dev_indices);
return lastBool + lastIdx;
}
__global__ void kernBitKNegative(int n, int *dev_idata, int *dev_bools, int bitK) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
dev_bools[index] = (dev_idata[index] & (1 << bitK)) != 0 ? 0 : 1;
}
__global__ void kernSplit(int n, int *dev_idata, int *dev_odata, int *dev_scan, int bitK, int totalFalses) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int data = dev_idata[index];
int scanIdx = dev_scan[index];
if ((data & (1 << bitK)) == 0) {
dev_odata[scanIdx] = data;
} else {
dev_odata[index - scanIdx + totalFalses] = data;
}
}
void radixSort(int n, int *odata, const int *idata) {
dim3 blocks((n + blockSize - 1) / blockSize);
int size = 1 << ilog2ceil(n);
int maxNum = 0;
for (int i = 0; i < n; ++i) {
maxNum = ::max(maxNum, idata[i]);
}
int maxBit = ilog2ceil(maxNum);
int *dev_data1, *dev_data2, *dev_scan;
hipMalloc((void**) &dev_data1, n * sizeof(int));
hipMalloc((void**) &dev_data2, n * sizeof(int));
hipMalloc((void**) &dev_scan, size * sizeof(int));
hipMemcpy(dev_data1, idata, n * sizeof(int), hipMemcpyHostToDevice);
for (int i = 0; i <= maxBit; ++i) {
int lastBool, lastScan;
hipLaunchKernelGGL(( kernBitKNegative), dim3(blocks), dim3(blockSize), 0, 0, n, dev_data1, dev_scan, i);
hipMemcpy(&lastBool, dev_scan + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipMemset(dev_scan + n, 0, (size - n) * sizeof(int));
scanHelper(size, dev_scan);
hipMemcpy(&lastScan, dev_scan + n - 1, sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( kernSplit), dim3(blocks), dim3(blockSize), 0, 0, n, dev_data1, dev_data2, dev_scan, i, lastBool + lastScan);
std::swap(dev_data1, dev_data2);
}
hipMemcpy(odata, dev_data1, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_data1);
hipFree(dev_data2);
hipFree(dev_scan);
}
}
}
| c5d4061dc48b748547153bc0ccdcf9068b56c1e2.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define USING_SHARED_MEMORY 1
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#if USING_SHARED_MEMORY
const int blockSize_sharedMemory = 128;
const int logNumBanks = 5;
#define CONFLICT_FREE_OFFSET(n) ((n) >> logNumBanks)
__global__ void kernScanPerBlock(int n, int *dev_data, int *dev_blockSum) {
if (n == 1) {
dev_data[0] = 0;
return;
}
if (threadIdx.x >= n / 2) {
return;
}
__shared__ int temp[2 * blockSize_sharedMemory + CONFLICT_FREE_OFFSET(2 * blockSize_sharedMemory)];
dev_data += blockDim.x * blockIdx.x * 2;
if (n > blockDim.x * 2) {
n = blockDim.x * 2;
}
int i = threadIdx.x;
int j = threadIdx.x + n / 2;
int ti = i + CONFLICT_FREE_OFFSET(i);
int tj = j + CONFLICT_FREE_OFFSET(j);
temp[ti] = dev_data[i];
temp[tj] = dev_data[j];
int lastElement = 0;
if (dev_blockSum && threadIdx.x == blockDim.x - 1) {
lastElement = temp[tj];
}
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d) {
int i = offset * (2 * threadIdx.x + 1) - 1;
int j = offset * (2 * threadIdx.x + 2) - 1;
i += CONFLICT_FREE_OFFSET(i);
j += CONFLICT_FREE_OFFSET(j);
temp[j] += temp[i];
}
offset *= 2;
}
if (threadIdx.x == 0) {
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (threadIdx.x < d) {
int i = offset * (2 * threadIdx.x + 1) - 1;
int j = offset * (2 * threadIdx.x + 2) - 1;
i += CONFLICT_FREE_OFFSET(i);
j += CONFLICT_FREE_OFFSET(j);
int t = temp[i];
temp[i] = temp[j];
temp[j] += t;
}
}
__syncthreads();
dev_data[i] = temp[ti];
dev_data[j] = temp[tj];
if (dev_blockSum && threadIdx.x == blockDim.x - 1) {
dev_blockSum[blockIdx.x] = lastElement + temp[tj];
}
}
__global__ void kernAddPerBlock(int *dev_data, int *dev_add) {
int blockSum = dev_add[blockIdx.x];
dev_data += blockIdx.x * blockDim.x * 2;
dev_data[threadIdx.x] += blockSum;
dev_data[threadIdx.x + blockDim.x] += blockSum;
}
void scanHelper(int size, int *dev_data) {
if (size > 2 * blockSize_sharedMemory) {
int blocks = size / (2 * blockSize_sharedMemory);
int *dev_blockSum;
cudaMalloc((void**) &dev_blockSum, blocks * sizeof(int));
kernScanPerBlock<<<blocks, blockSize_sharedMemory>>>(size, dev_data, dev_blockSum);
scanHelper(blocks, dev_blockSum);
kernAddPerBlock<<<blocks, blockSize_sharedMemory>>>(dev_data, dev_blockSum);
cudaFree(dev_blockSum);
} else {
kernScanPerBlock<<<1, blockSize_sharedMemory>>>(size, dev_data, nullptr);
}
}
#else
__global__ void kernScanUpSweepPhase(int threads, int *dev_temp, int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= threads) {
return;
}
index = (index + 1) * offset * 2 - 1;
dev_temp[index] += dev_temp[index - offset];
}
__global__ void kernScanDownSweepPhase(int threads, int *dev_temp, int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= threads) {
return;
}
index = (index + 1) * offset * 2 - 1;
int t = dev_temp[index - offset];
dev_temp[index - offset] = dev_temp[index];
dev_temp[index] += t;
}
void scanHelper(int size, int *dev_temp) {
int threads = size / 2;
int offset = 1;
for (; threads > 0; threads /= 2, offset *= 2) {
dim3 blocks((threads + blockSize - 1) / blockSize);
kernScanUpSweepPhase<<<blocks, blockSize>>>(threads, dev_temp, offset);
}
cudaMemset(dev_temp + size - 1, 0, sizeof(int));
threads = 1;
offset = size / 2;
for (; offset > 0; offset /= 2, threads *= 2) {
dim3 blocks((threads + blockSize - 1) / blockSize);
kernScanDownSweepPhase<<<blocks, blockSize>>>(threads, dev_temp, offset);
}
}
#endif
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int size = 1 << ilog2ceil(n);
int *dev_temp;
cudaMalloc((void**) &dev_temp, size * sizeof(int));
cudaMemcpy(dev_temp, idata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(dev_temp + n, 0, (size - n) * sizeof(int));
timer().startGpuTimer();
scanHelper(size, dev_temp);
timer().endGpuTimer();
cudaMemcpy(odata, dev_temp, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_temp);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
dim3 blocks((n + blockSize - 1) / blockSize);
int size = 1 << ilog2ceil(n);
int *dev_idata, *dev_odata, *dev_bools, *dev_indices;
cudaMalloc((void**) &dev_idata, n * sizeof(int));
cudaMalloc((void**) &dev_odata, n * sizeof(int));
cudaMalloc((void**) &dev_bools, n * sizeof(int));
cudaMalloc((void**) &dev_indices, size * sizeof(int));
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(dev_indices + n, 0, (size - n) * sizeof(int));
timer().startGpuTimer();
StreamCompaction::Common::kernMapToBoolean<<<blocks, blockSize>>>(n, dev_bools, dev_idata);
cudaMemcpy(dev_indices, dev_bools, n * sizeof(int), cudaMemcpyDeviceToDevice);
scanHelper(size, dev_indices);
StreamCompaction::Common::kernScatter<<<blocks, blockSize>>>(n, dev_odata, dev_idata, dev_bools, dev_indices);
timer().endGpuTimer();
int lastBool, lastIdx;
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&lastBool, dev_bools + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&lastIdx, dev_indices + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_bools);
cudaFree(dev_indices);
return lastBool + lastIdx;
}
__global__ void kernBitKNegative(int n, int *dev_idata, int *dev_bools, int bitK) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
dev_bools[index] = (dev_idata[index] & (1 << bitK)) != 0 ? 0 : 1;
}
__global__ void kernSplit(int n, int *dev_idata, int *dev_odata, int *dev_scan, int bitK, int totalFalses) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int data = dev_idata[index];
int scanIdx = dev_scan[index];
if ((data & (1 << bitK)) == 0) {
dev_odata[scanIdx] = data;
} else {
dev_odata[index - scanIdx + totalFalses] = data;
}
}
void radixSort(int n, int *odata, const int *idata) {
dim3 blocks((n + blockSize - 1) / blockSize);
int size = 1 << ilog2ceil(n);
int maxNum = 0;
for (int i = 0; i < n; ++i) {
maxNum = std::max(maxNum, idata[i]);
}
int maxBit = ilog2ceil(maxNum);
int *dev_data1, *dev_data2, *dev_scan;
cudaMalloc((void**) &dev_data1, n * sizeof(int));
cudaMalloc((void**) &dev_data2, n * sizeof(int));
cudaMalloc((void**) &dev_scan, size * sizeof(int));
cudaMemcpy(dev_data1, idata, n * sizeof(int), cudaMemcpyHostToDevice);
for (int i = 0; i <= maxBit; ++i) {
int lastBool, lastScan;
kernBitKNegative<<<blocks, blockSize>>>(n, dev_data1, dev_scan, i);
cudaMemcpy(&lastBool, dev_scan + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemset(dev_scan + n, 0, (size - n) * sizeof(int));
scanHelper(size, dev_scan);
cudaMemcpy(&lastScan, dev_scan + n - 1, sizeof(int), cudaMemcpyDeviceToHost);
kernSplit<<<blocks, blockSize>>>(n, dev_data1, dev_data2, dev_scan, i, lastBool + lastScan);
std::swap(dev_data1, dev_data2);
}
cudaMemcpy(odata, dev_data1, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_data1);
cudaFree(dev_data2);
cudaFree(dev_scan);
}
}
}
|
fe5d4d54652032bb064f5bd5893e38a101958c36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#include "paddle/fluid/operators/masked_select_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DDim = framework::DDim;
__global__ void SetMaskArray(const bool* mask, int32_t* mask_array, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx])
mask_array[idx] = 1;
else
mask_array[idx] = 0;
}
}
template <typename T>
__global__ void SelectWithPrefixMask(const int32_t* mask_prefix_sum,
const bool* mask, const T* input, T* out,
int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx]) {
int index = mask_prefix_sum[idx];
out[index] = input[idx];
}
}
}
template <typename T>
__global__ void SelectGradWithPrefixMask(const int32_t* mask_prefix_sum,
const bool* mask, const T* input,
T* out, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx]) {
int index = mask_prefix_sum[idx];
out[idx] = input[index];
} else {
out[idx] = 0;
}
}
}
template <typename DeviceContext, typename T>
class MaskedSelectCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto input = ctx.Input<framework::Tensor>("X");
auto mask = ctx.Input<framework::Tensor>("Mask");
auto out = ctx.Output<framework::Tensor>("Y");
auto* mask_data = mask->data<bool>();
auto input_data = input->data<T>();
auto mask_size = mask->numel();
auto input_dim = input->dims();
auto mask_dim = mask->dims();
PADDLE_ENFORCE_EQ(
input_dim, mask_dim,
platform::errors::InvalidArgument(
"The dim size of input and mask in OP(masked_selected) "
"must be equal, but got input dim:(%ld), mask dim: "
"(%ld). Please check input "
"value.",
input_dim, mask_dim));
thrust::device_ptr<const bool> mask_dev_ptr =
thrust::device_pointer_cast(mask_data);
thrust::device_vector<T> mask_vec(mask_dev_ptr, mask_dev_ptr + mask_size);
auto out_size = thrust::count(mask_vec.begin(), mask_vec.end(), true);
framework::DDim out_dim{out_size};
out->Resize(out_dim);
auto out_data = out->mutable_data<T>(ctx.GetPlace());
Tensor mask_array;
Tensor mask_prefix_sum;
mask_array.Resize(mask_dim);
mask_prefix_sum.Resize(mask_dim);
int32_t* mask_array_data = mask_array.mutable_data<int32_t>(ctx.GetPlace());
int32_t* mask_prefix_sum_data =
mask_prefix_sum.mutable_data<int32_t>(ctx.GetPlace());
int threads = 512;
int grid = (mask_size + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
hipLaunchKernelGGL(( SetMaskArray), dim3(grid), dim3(threads), 0, stream, mask_data, mask_array_data,
mask_size);
thrust::device_ptr<int32_t> mask_array_dev_ptr =
thrust::device_pointer_cast(mask_array_data);
thrust::device_vector<int32_t> mask_array_vec(
mask_array_dev_ptr, mask_array_dev_ptr + mask_size);
thrust::exclusive_scan(thrust::device, mask_array_vec.begin(),
mask_array_vec.end(), mask_prefix_sum_data);
hipLaunchKernelGGL(( SelectWithPrefixMask<T>), dim3(grid), dim3(threads), 0, stream,
mask_prefix_sum_data, mask_data, input_data, out_data, mask_size);
}
};
template <typename DeviceContext, typename T>
class MaskedSelectGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto input = ctx.Input<framework::Tensor>(framework::GradVarName("Y"));
auto mask = ctx.Input<framework::Tensor>("Mask");
auto out = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto* mask_data = mask->data<bool>();
auto* input_data = input->data<T>();
auto* out_data = out->mutable_data<T>(ctx.GetPlace());
auto input_size = input->numel();
auto mask_size = mask->numel();
auto mask_dim = mask->dims();
auto out_size = mask_size;
Tensor mask_array;
Tensor mask_prefix_sum;
mask_array.Resize(mask_dim);
mask_prefix_sum.Resize(mask_dim);
int32_t* mask_array_data = mask_array.mutable_data<int32_t>(ctx.GetPlace());
int32_t* mask_prefix_sum_data =
mask_prefix_sum.mutable_data<int32_t>(ctx.GetPlace());
int threads = 512;
int grid = (mask_size + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
hipLaunchKernelGGL(( SetMaskArray), dim3(grid), dim3(threads), 0, stream, mask_data, mask_array_data,
mask_size);
thrust::device_ptr<int32_t> mask_array_dev_ptr =
thrust::device_pointer_cast(mask_array_data);
thrust::device_vector<int32_t> mask_array_vec(
mask_array_dev_ptr, mask_array_dev_ptr + mask_size);
thrust::exclusive_scan(thrust::device, mask_array_vec.begin(),
mask_array_vec.end(), mask_prefix_sum_data);
hipLaunchKernelGGL(( SelectGradWithPrefixMask<T>), dim3(grid), dim3(threads), 0, stream,
mask_prefix_sum_data, mask_data, input_data, out_data, mask_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
masked_select,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
masked_select_grad,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext,
double>,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext,
int64_t>);
| fe5d4d54652032bb064f5bd5893e38a101958c36.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>
#include "paddle/fluid/operators/masked_select_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DDim = framework::DDim;
__global__ void SetMaskArray(const bool* mask, int32_t* mask_array, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx])
mask_array[idx] = 1;
else
mask_array[idx] = 0;
}
}
template <typename T>
__global__ void SelectWithPrefixMask(const int32_t* mask_prefix_sum,
const bool* mask, const T* input, T* out,
int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx]) {
int index = mask_prefix_sum[idx];
out[index] = input[idx];
}
}
}
template <typename T>
__global__ void SelectGradWithPrefixMask(const int32_t* mask_prefix_sum,
const bool* mask, const T* input,
T* out, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < size; idx += blockDim.x * gridDim.x) {
if (mask[idx]) {
int index = mask_prefix_sum[idx];
out[idx] = input[index];
} else {
out[idx] = 0;
}
}
}
template <typename DeviceContext, typename T>
class MaskedSelectCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto input = ctx.Input<framework::Tensor>("X");
auto mask = ctx.Input<framework::Tensor>("Mask");
auto out = ctx.Output<framework::Tensor>("Y");
auto* mask_data = mask->data<bool>();
auto input_data = input->data<T>();
auto mask_size = mask->numel();
auto input_dim = input->dims();
auto mask_dim = mask->dims();
PADDLE_ENFORCE_EQ(
input_dim, mask_dim,
platform::errors::InvalidArgument(
"The dim size of input and mask in OP(masked_selected) "
"must be equal, but got input dim:(%ld), mask dim: "
"(%ld). Please check input "
"value.",
input_dim, mask_dim));
thrust::device_ptr<const bool> mask_dev_ptr =
thrust::device_pointer_cast(mask_data);
thrust::device_vector<T> mask_vec(mask_dev_ptr, mask_dev_ptr + mask_size);
auto out_size = thrust::count(mask_vec.begin(), mask_vec.end(), true);
framework::DDim out_dim{out_size};
out->Resize(out_dim);
auto out_data = out->mutable_data<T>(ctx.GetPlace());
Tensor mask_array;
Tensor mask_prefix_sum;
mask_array.Resize(mask_dim);
mask_prefix_sum.Resize(mask_dim);
int32_t* mask_array_data = mask_array.mutable_data<int32_t>(ctx.GetPlace());
int32_t* mask_prefix_sum_data =
mask_prefix_sum.mutable_data<int32_t>(ctx.GetPlace());
int threads = 512;
int grid = (mask_size + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
SetMaskArray<<<grid, threads, 0, stream>>>(mask_data, mask_array_data,
mask_size);
thrust::device_ptr<int32_t> mask_array_dev_ptr =
thrust::device_pointer_cast(mask_array_data);
thrust::device_vector<int32_t> mask_array_vec(
mask_array_dev_ptr, mask_array_dev_ptr + mask_size);
thrust::exclusive_scan(thrust::device, mask_array_vec.begin(),
mask_array_vec.end(), mask_prefix_sum_data);
SelectWithPrefixMask<T><<<grid, threads, 0, stream>>>(
mask_prefix_sum_data, mask_data, input_data, out_data, mask_size);
}
};
template <typename DeviceContext, typename T>
class MaskedSelectGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto input = ctx.Input<framework::Tensor>(framework::GradVarName("Y"));
auto mask = ctx.Input<framework::Tensor>("Mask");
auto out = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
auto* mask_data = mask->data<bool>();
auto* input_data = input->data<T>();
auto* out_data = out->mutable_data<T>(ctx.GetPlace());
auto input_size = input->numel();
auto mask_size = mask->numel();
auto mask_dim = mask->dims();
auto out_size = mask_size;
Tensor mask_array;
Tensor mask_prefix_sum;
mask_array.Resize(mask_dim);
mask_prefix_sum.Resize(mask_dim);
int32_t* mask_array_data = mask_array.mutable_data<int32_t>(ctx.GetPlace());
int32_t* mask_prefix_sum_data =
mask_prefix_sum.mutable_data<int32_t>(ctx.GetPlace());
int threads = 512;
int grid = (mask_size + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
SetMaskArray<<<grid, threads, 0, stream>>>(mask_data, mask_array_data,
mask_size);
thrust::device_ptr<int32_t> mask_array_dev_ptr =
thrust::device_pointer_cast(mask_array_data);
thrust::device_vector<int32_t> mask_array_vec(
mask_array_dev_ptr, mask_array_dev_ptr + mask_size);
thrust::exclusive_scan(thrust::device, mask_array_vec.begin(),
mask_array_vec.end(), mask_prefix_sum_data);
SelectGradWithPrefixMask<T><<<grid, threads, 0, stream>>>(
mask_prefix_sum_data, mask_data, input_data, out_data, mask_size);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
masked_select,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, double>,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::MaskedSelectCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>);
REGISTER_OP_CUDA_KERNEL(
masked_select_grad,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext, float>,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext,
double>,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext, int>,
ops::MaskedSelectGradCUDAKernel<paddle::platform::CUDADeviceContext,
int64_t>);
|
47bd5764a14b37ce7d63cca370e63eeac8abe6d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rsbench_hip.cuh"
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// implementation of the algorithm, with only minor GPU optimizations in place.
// Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, RSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
template<typename... Args>
__device__
void __enzyme_autodiff(void*, Args...);
__device__ int enzyme_dup, enzyme_const, enzyme_active;
void run_event_based_simulation(Input input, SimulationData GSD, unsigned long * vhash_result )
{
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
printf("Running baseline event-based simulation on device...\n");
int nthreads = 32;
int nblocks = 1;//ceil( (double) input.lookups / 32.0);
hipLaunchKernelGGL(( xs_lookup_kernel_baseline), dim3(nblocks), dim3(nthreads), 0, 0, input, GSD );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + input.lookups, 0);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
*vhash_result = verification_scalar;
}
// In this kernel, we perform a single lookup with each thread. Threads within a warp
// do not really have any relation to each other, and divergence due to high nuclide count fuel
// material lookups are costly. This kernel constitutes baseline performance.
__global__ void xs_lookup_kernel_baseline(Input in, SimulationData GSD )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double E = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs[4] = {0};
double d_macro_xs[4] = {1.0};
#if 0
calculate_macro_xs( macro_xs, mat, E, in, GSD.num_nucs, GSD.mats, GSD.max_num_nucs, GSD.concs, GSD.n_windows, GSD.pseudo_K0RS, GSD.windows, GSD.poles, GSD.max_num_windows, GSD.max_num_poles );
#else
__enzyme_autodiff((void*)calculate_macro_xs,
enzyme_dup, macro_xs, d_macro_xs,
enzyme_const, mat,
enzyme_const, E,
enzyme_const, in,
enzyme_const, GSD.num_nucs,
enzyme_const, GSD.mats,
enzyme_const, GSD.max_num_nucs,
enzyme_const, GSD.concs,
enzyme_const, GSD.n_windows,
enzyme_const, GSD.pseudo_K0RS,
enzyme_const, GSD.windows,
enzyme_const, GSD.poles,
//enzyme_dup, GSD.poles, GSD.d_poles,
enzyme_const, GSD.max_num_windows,
enzyme_const, GSD.max_num_poles
);
#endif
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we write to a global
// verification array that will get reduced after this kernel comples.
double max = -DBL_MAX;
int max_idx = 0;
for(int x = 0; x < 4; x++ )
{
if( macro_xs[x] > max )
{
max = macro_xs[x];
max_idx = x;
}
}
GSD.verification[i] = max_idx+1;
}
__attribute__((noinline))
__device__ void body( int i, double * __restrict__ macro_xs, int mat, double E, const Input& __restrict__ input, int * __restrict__ num_nucs, int * __restrict__ mats, int max_num_nucs, double * __restrict__ concs, int * __restrict__ n_windows, double * __restrict__ pseudo_K0Rs, Window * __restrict__ windows, Pole * __restrict__ poles, int max_num_windows, int max_num_poles ) {
double micro_xs[4];
int nuc = mats[mat * max_num_nucs + i];
if( input.doppler == 1 )
calculate_micro_xs_doppler( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
else
calculate_micro_xs( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
for( int j = 0; j < 4; j++ )
{
macro_xs[j] += micro_xs[j] * concs[mat * max_num_nucs + i];
}
}
__device__ void calculate_macro_xs( double * __restrict__ macro_xs, int mat, double E, Input input, int * __restrict__ num_nucs, int * __restrict__ mats, int max_num_nucs, double * __restrict__ concs, int * __restrict__ n_windows, double * __restrict__ pseudo_K0Rs, Window * __restrict__ windows, Pole * __restrict__ poles, int max_num_windows, int max_num_poles )
{
// zero out macro vector
for( int i = 0; i < 4; i++ )
macro_xs[i] = 0;
// for nuclide in mat
for( int i = 0; i < num_nucs[mat]; i++ )
{
body(i, macro_xs, mat, E, input, num_nucs, mats, max_num_nucs, concs, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
/*
double micro_xs[4];
int nuc = mats[mat * max_num_nucs + i];
if( input.doppler == 1 )
calculate_micro_xs_doppler( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
else
calculate_micro_xs( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
for( int j = 0; j < 4; j++ )
{
macro_xs[j] += micro_xs[j] * concs[mat * max_num_nucs + i];
}
*/
// Debug
/*
printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n",
E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] );
*/
}
// Debug
/*
printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n",
E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] );
*/
}
// No Temperature dependence (i.e., 0K evaluation)
__attribute__((always_inline))
__device__ void calculate_micro_xs( double * micro_xs, int nuc, double E, Input input, int * n_windows, double * pseudo_K0RS, Window * windows, Pole * poles, int max_num_windows, int max_num_poles)
{
// MicroScopic XS's to Calculate
double sigT;
double sigA;
double sigF;
double sigE;
// Calculate Window Index
double spacing = 1.0 / n_windows[nuc];
int window = (int) ( E / spacing );
if( window == n_windows[nuc] )
window--;
// Calculate sigTfactors
RSComplex sigTfactors[4]; // Of length input.numL, which is always 4
calculate_sig_T(nuc, E, input, pseudo_K0RS, sigTfactors );
// Calculate contributions from window "background" (i.e., poles outside window (pre-calculated)
Window w = windows[nuc * max_num_windows + window];
sigT = E * w.T;
sigA = E * w.A;
sigF = E * w.F;
// Loop over Poles within window, add contributions
for( int i = w.start; i < w.end; i++ )
{
RSComplex PSIIKI;
RSComplex CDUM;
Pole pole = poles[nuc * max_num_poles + i];
RSComplex t1 = {0, 1};
RSComplex t2 = {sqrt(E), 0 };
PSIIKI = c_div( t1 , c_sub(pole.MP_EA,t2) );
RSComplex E_c = {E, 0};
CDUM = c_div(PSIIKI, E_c);
sigT += (c_mul(pole.MP_RT, c_mul(CDUM, sigTfactors[pole.l_value])) ).r;
sigA += (c_mul( pole.MP_RA, CDUM)).r;
sigF += (c_mul(pole.MP_RF, CDUM)).r;
}
sigE = sigT - sigA;
micro_xs[0] = sigT;
micro_xs[1] = sigA;
micro_xs[2] = sigF;
micro_xs[3] = sigE;
}
// Temperature Dependent Variation of Kernel
// (This involves using the Complex Faddeeva function to
// Doppler broaden the poles within the window)
__attribute__((always_inline))
__device__ void calculate_micro_xs_doppler( double * micro_xs, int nuc, double E, Input input, int * n_windows, double * pseudo_K0RS, Window * windows, Pole * poles, int max_num_windows, int max_num_poles )
{
// MicroScopic XS's to Calculate
double sigT;
double sigA;
double sigF;
double sigE;
// Calculate Window Index
double spacing = 1.0 / n_windows[nuc];
int window = (int) ( E / spacing );
if( window == n_windows[nuc] )
window--;
// Calculate sigTfactors
RSComplex sigTfactors[4]; // Of length input.numL, which is always 4
calculate_sig_T(nuc, E, input, pseudo_K0RS, sigTfactors );
// Calculate contributions from window "background" (i.e., poles outside window (pre-calculated)
Window w = windows[nuc * max_num_windows + window];
sigT = E * w.T;
sigA = E * w.A;
sigF = E * w.F;
double dopp = 0.5;
// Loop over Poles within window, add contributions
for( int i = w.start; i < w.end; i++ )
{
Pole pole = poles[nuc * max_num_poles + i];
// Prep Z
RSComplex E_c = {E, 0};
RSComplex dopp_c = {dopp, 0};
RSComplex Z = c_mul(c_sub(E_c, pole.MP_EA), dopp_c);
// Evaluate Fadeeva Function
RSComplex faddeeva = fast_nuclear_W( Z );
// Update W
sigT += (c_mul( pole.MP_RT, c_mul(faddeeva, sigTfactors[pole.l_value]) )).r;
sigA += (c_mul( pole.MP_RA , faddeeva)).r;
sigF += (c_mul( pole.MP_RF , faddeeva)).r;
}
sigE = sigT - sigA;
micro_xs[0] = sigT;
micro_xs[1] = sigA;
micro_xs[2] = sigF;
micro_xs[3] = sigE;
}
// picks a material based on a probabilistic distribution
__device__ int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
__device__ void calculate_sig_T( int nuc, double E, Input input, double * pseudo_K0RS, RSComplex * sigTfactors )
{
double phi;
#pragma unroll
for( int i = 0; i < 4; i++ )
{
phi = pseudo_K0RS[nuc * input.numL + i] * sqrt(E);
if( i == 1 )
phi -= - atan( phi );
else if( i == 2 )
phi -= atan( 3.0 * phi / (3.0 - phi*phi));
else if( i == 3 )
phi -= atan(phi*(15.0-phi*phi)/(15.0-6.0*phi*phi));
phi *= 2.0;
sigTfactors[i].r = cos(phi);
sigTfactors[i].i = -sin(phi);
}
}
// This function uses a combination of the Abrarov Approximation
// and the QUICK_W three term asymptotic expansion.
// Only expected to use Abrarov ~0.5% of the time.
__device__ RSComplex fast_nuclear_W( RSComplex Z )
{
// Abrarov
if( c_abs(Z) < 6.0 )
{
// Precomputed parts for speeding things up
// (N = 10, Tm = 12.0)
RSComplex prefactor = {0, 8.124330e+01};
double an[10] = {
2.758402e-01,
2.245740e-01,
1.594149e-01,
9.866577e-02,
5.324414e-02,
2.505215e-02,
1.027747e-02,
3.676164e-03,
1.146494e-03,
3.117570e-04
};
double neg_1n[10] = {
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0
};
double denominator_left[10] = {
9.869604e+00,
3.947842e+01,
8.882644e+01,
1.579137e+02,
2.467401e+02,
3.553058e+02,
4.836106e+02,
6.316547e+02,
7.994380e+02,
9.869604e+02
};
RSComplex t1 = {0, 12};
RSComplex t2 = {12, 0};
RSComplex i = {0,1};
RSComplex one = {1, 0};
RSComplex W = c_div(c_mul(i, ( c_sub(one, fast_cexp(c_mul(t1, Z))) )) , c_mul(t2, Z));
RSComplex sum = {0,0};
for( int n = 0; n < 10; n++ )
{
RSComplex t3 = {neg_1n[n], 0};
RSComplex top = c_sub(c_mul(t3, fast_cexp(c_mul(t1, Z))), one);
RSComplex t4 = {denominator_left[n], 0};
RSComplex t5 = {144, 0};
RSComplex bot = c_sub(t4, c_mul(t5,c_mul(Z,Z)));
RSComplex t6 = {an[n], 0};
sum = c_add(sum, c_mul(t6, c_div(top,bot)));
}
W = c_add(W, c_mul(prefactor, c_mul(Z, sum)));
return W;
}
else
{
// QUICK_2 3 Term Asymptotic Expansion (Accurate to O(1e-6)).
// Pre-computed parameters
RSComplex a = {0.512424224754768462984202823134979415014943561548661637413182,0};
RSComplex b = {0.275255128608410950901357962647054304017026259671664935783653, 0};
RSComplex c = {0.051765358792987823963876628425793170829107067780337219430904, 0};
RSComplex d = {2.724744871391589049098642037352945695982973740328335064216346, 0};
RSComplex i = {0,1};
RSComplex Z2 = c_mul(Z, Z);
// Three Term Asymptotic Expansion
RSComplex W = c_mul(c_mul(Z,i), (c_add(c_div(a,(c_sub(Z2, b))) , c_div(c,(c_sub(Z2, d))))));
return W;
}
}
__host__ __device__ double LCG_random_double(uint64_t * seed)
{
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
__host__ __device__ uint64_t LCG_random_int(uint64_t * seed)
{
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return *seed;
}
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
// Complex arithmetic functions
__device__ RSComplex c_add( RSComplex A, RSComplex B)
{
RSComplex C;
C.r = A.r + B.r;
C.i = A.i + B.i;
return C;
}
__device__ RSComplex c_sub( RSComplex A, RSComplex B)
{
RSComplex C;
C.r = A.r - B.r;
C.i = A.i - B.i;
return C;
}
__host__ __device__ RSComplex c_mul( RSComplex A, RSComplex B)
{
double a = A.r;
double b = A.i;
double c = B.r;
double d = B.i;
RSComplex C;
C.r = (a*c) - (b*d);
C.i = (a*d) + (b*c);
return C;
}
__device__ RSComplex c_div( RSComplex A, RSComplex B)
{
double a = A.r;
double b = A.i;
double c = B.r;
double d = B.i;
RSComplex C;
double denom = c*c + d*d;
C.r = ( (a*c) + (b*d) ) / denom;
C.i = ( (b*c) - (a*d) ) / denom;
return C;
}
__device__ double c_abs( RSComplex A)
{
return sqrt(A.r*A.r + A.i * A.i);
}
// Fast (but inaccurate) exponential function
// Written By "ACMer":
// https://codingforspeed.com/using-faster-exponential-approximation/
// We use our own to avoid small differences in compiler specific
// exp() intrinsic implementations that make it difficult to verify
// if the code is working correctly or not.
__device__ double fast_exp(double x)
{
x = 1.0 + x * 0.000244140625;
x *= x; x *= x; x *= x; x *= x;
x *= x; x *= x; x *= x; x *= x;
x *= x; x *= x; x *= x; x *= x;
return x;
}
// Implementation based on:
// z = x + iy
// cexp(z) = e^x * (cos(y) + i * sin(y))
__device__ RSComplex fast_cexp( RSComplex z )
{
double x = z.r;
double y = z.i;
// For consistency across architectures, we
// will use our own exponetial implementation
//double t1 = exp(x);
double t1 = fast_exp(x);
double t2 = cos(y);
double t3 = sin(y);
RSComplex t4 = {t2, t3};
RSComplex t5 = {t1, 0};
RSComplex result = c_mul(t5, (t4));
return result;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies
// specific to GPU. By default, RSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort
// + Energy Sort
////////////////////////////////////////////////////////////////////////////////////
// This optimization builds on optimization 4, adding in a second sort by energy.
// It is extremely fast, as now most of the threads within a warp will be hitting
// the same indices in the lookup grids. This greatly reduces thread divergence and
// greatly improves cache efficiency and re-use.
//
// However, it is unlikely that this exact optimization would be possible in a real
// application like OpenMC. One major difference is that particle objects are quite
// large, often having 50+ variable fields, such that sorting them in memory becomes
// rather expensive. Instead, the best possible option would probably be to create
// intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly.
////////////////////////////////////////////////////////////////////////////////////
__global__ void sampling_kernel(Input in, SimulationData GSD )
{
// The lookup ID.
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Store sample data in state array
GSD.p_energy_samples[i] = p_energy;
GSD.mat_samples[i] = mat;
}
__global__ void xs_lookup_kernel_optimization_1(Input in, SimulationData GSD, int m, int n_lookups, int offset )
{
// The lookup ID. Used to set the seed, and to store the verification value
int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= n_lookups )
return;
i += offset;
// Check that our material type matches the kernel material
int mat = GSD.mat_samples[i];
if( mat != m )
return;
double macro_xs[4] = {0};
calculate_macro_xs( macro_xs, mat, GSD.p_energy_samples[i], in, GSD.num_nucs, GSD.mats, GSD.max_num_nucs, GSD.concs, GSD.n_windows, GSD.pseudo_K0RS, GSD.windows, GSD.poles, GSD.max_num_windows, GSD.max_num_poles );
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we write to a global
// verification array that will get reduced after this kernel comples.
double max = -DBL_MAX;
int max_idx = 0;
for(int x = 0; x < 4; x++ )
{
if( macro_xs[x] > max )
{
max = macro_xs[x];
max_idx = x;
}
}
GSD.verification[i] = max_idx+1;
}
void run_event_based_simulation_optimization_1(Input in, SimulationData GSD, unsigned long * vhash_result)
{
const char * optimization_name = "Optimization 1 - Material & Energy Sorts + Material-specific Kernels";
printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// Count the number of fuel material lookups that need to be performed (fuel id = 0)
int n_lookups_per_material[12];
for( int m = 0; m < 12; m++ )
n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m);
// Sort by material first
thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples);
// Now, sort each material by energy
int offset = 0;
for( int m = 0; m < 12; m++ )
{
thrust::sort_by_key(thrust::device, GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset);
offset += n_lookups_per_material[m];
}
// Launch all material kernels individually
offset = 0;
for( int m = 0; m < 12; m++ )
{
nthreads = 32;
nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads);
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_1), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset );
offset += n_lookups_per_material[m];
}
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
*vhash_result = verification_scalar;
}
| 47bd5764a14b37ce7d63cca370e63eeac8abe6d5.cu | #include "rsbench.cuh"
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// implementation of the algorithm, with only minor GPU optimizations in place.
// Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, RSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
template<typename... Args>
__device__
void __enzyme_autodiff(void*, Args...);
__device__ int enzyme_dup, enzyme_const, enzyme_active;
void run_event_based_simulation(Input input, SimulationData GSD, unsigned long * vhash_result )
{
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
printf("Running baseline event-based simulation on device...\n");
int nthreads = 32;
int nblocks = 1;//ceil( (double) input.lookups / 32.0);
xs_lookup_kernel_baseline<<<nblocks, nthreads>>>( input, GSD );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + input.lookups, 0);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
*vhash_result = verification_scalar;
}
// In this kernel, we perform a single lookup with each thread. Threads within a warp
// do not really have any relation to each other, and divergence due to high nuclide count fuel
// material lookups are costly. This kernel constitutes baseline performance.
__global__ void xs_lookup_kernel_baseline(Input in, SimulationData GSD )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double E = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs[4] = {0};
double d_macro_xs[4] = {1.0};
#if 0
calculate_macro_xs( macro_xs, mat, E, in, GSD.num_nucs, GSD.mats, GSD.max_num_nucs, GSD.concs, GSD.n_windows, GSD.pseudo_K0RS, GSD.windows, GSD.poles, GSD.max_num_windows, GSD.max_num_poles );
#else
__enzyme_autodiff((void*)calculate_macro_xs,
enzyme_dup, macro_xs, d_macro_xs,
enzyme_const, mat,
enzyme_const, E,
enzyme_const, in,
enzyme_const, GSD.num_nucs,
enzyme_const, GSD.mats,
enzyme_const, GSD.max_num_nucs,
enzyme_const, GSD.concs,
enzyme_const, GSD.n_windows,
enzyme_const, GSD.pseudo_K0RS,
enzyme_const, GSD.windows,
enzyme_const, GSD.poles,
//enzyme_dup, GSD.poles, GSD.d_poles,
enzyme_const, GSD.max_num_windows,
enzyme_const, GSD.max_num_poles
);
#endif
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we write to a global
// verification array that will get reduced after this kernel comples.
double max = -DBL_MAX;
int max_idx = 0;
for(int x = 0; x < 4; x++ )
{
if( macro_xs[x] > max )
{
max = macro_xs[x];
max_idx = x;
}
}
GSD.verification[i] = max_idx+1;
}
__attribute__((noinline))
__device__ void body( int i, double * __restrict__ macro_xs, int mat, double E, const Input& __restrict__ input, int * __restrict__ num_nucs, int * __restrict__ mats, int max_num_nucs, double * __restrict__ concs, int * __restrict__ n_windows, double * __restrict__ pseudo_K0Rs, Window * __restrict__ windows, Pole * __restrict__ poles, int max_num_windows, int max_num_poles ) {
double micro_xs[4];
int nuc = mats[mat * max_num_nucs + i];
if( input.doppler == 1 )
calculate_micro_xs_doppler( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
else
calculate_micro_xs( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
for( int j = 0; j < 4; j++ )
{
macro_xs[j] += micro_xs[j] * concs[mat * max_num_nucs + i];
}
}
__device__ void calculate_macro_xs( double * __restrict__ macro_xs, int mat, double E, Input input, int * __restrict__ num_nucs, int * __restrict__ mats, int max_num_nucs, double * __restrict__ concs, int * __restrict__ n_windows, double * __restrict__ pseudo_K0Rs, Window * __restrict__ windows, Pole * __restrict__ poles, int max_num_windows, int max_num_poles )
{
// zero out macro vector
for( int i = 0; i < 4; i++ )
macro_xs[i] = 0;
// for nuclide in mat
for( int i = 0; i < num_nucs[mat]; i++ )
{
body(i, macro_xs, mat, E, input, num_nucs, mats, max_num_nucs, concs, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
/*
double micro_xs[4];
int nuc = mats[mat * max_num_nucs + i];
if( input.doppler == 1 )
calculate_micro_xs_doppler( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
else
calculate_micro_xs( micro_xs, nuc, E, input, n_windows, pseudo_K0Rs, windows, poles, max_num_windows, max_num_poles);
for( int j = 0; j < 4; j++ )
{
macro_xs[j] += micro_xs[j] * concs[mat * max_num_nucs + i];
}
*/
// Debug
/*
printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n",
E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] );
*/
}
// Debug
/*
printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n",
E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] );
*/
}
// No Temperature dependence (i.e., 0K evaluation)
__attribute__((always_inline))
__device__ void calculate_micro_xs( double * micro_xs, int nuc, double E, Input input, int * n_windows, double * pseudo_K0RS, Window * windows, Pole * poles, int max_num_windows, int max_num_poles)
{
// MicroScopic XS's to Calculate
double sigT;
double sigA;
double sigF;
double sigE;
// Calculate Window Index
double spacing = 1.0 / n_windows[nuc];
int window = (int) ( E / spacing );
if( window == n_windows[nuc] )
window--;
// Calculate sigTfactors
RSComplex sigTfactors[4]; // Of length input.numL, which is always 4
calculate_sig_T(nuc, E, input, pseudo_K0RS, sigTfactors );
// Calculate contributions from window "background" (i.e., poles outside window (pre-calculated)
Window w = windows[nuc * max_num_windows + window];
sigT = E * w.T;
sigA = E * w.A;
sigF = E * w.F;
// Loop over Poles within window, add contributions
for( int i = w.start; i < w.end; i++ )
{
RSComplex PSIIKI;
RSComplex CDUM;
Pole pole = poles[nuc * max_num_poles + i];
RSComplex t1 = {0, 1};
RSComplex t2 = {sqrt(E), 0 };
PSIIKI = c_div( t1 , c_sub(pole.MP_EA,t2) );
RSComplex E_c = {E, 0};
CDUM = c_div(PSIIKI, E_c);
sigT += (c_mul(pole.MP_RT, c_mul(CDUM, sigTfactors[pole.l_value])) ).r;
sigA += (c_mul( pole.MP_RA, CDUM)).r;
sigF += (c_mul(pole.MP_RF, CDUM)).r;
}
sigE = sigT - sigA;
micro_xs[0] = sigT;
micro_xs[1] = sigA;
micro_xs[2] = sigF;
micro_xs[3] = sigE;
}
// Temperature Dependent Variation of Kernel
// (This involves using the Complex Faddeeva function to
// Doppler broaden the poles within the window)
__attribute__((always_inline))
__device__ void calculate_micro_xs_doppler( double * micro_xs, int nuc, double E, Input input, int * n_windows, double * pseudo_K0RS, Window * windows, Pole * poles, int max_num_windows, int max_num_poles )
{
// MicroScopic XS's to Calculate
double sigT;
double sigA;
double sigF;
double sigE;
// Calculate Window Index
double spacing = 1.0 / n_windows[nuc];
int window = (int) ( E / spacing );
if( window == n_windows[nuc] )
window--;
// Calculate sigTfactors
RSComplex sigTfactors[4]; // Of length input.numL, which is always 4
calculate_sig_T(nuc, E, input, pseudo_K0RS, sigTfactors );
// Calculate contributions from window "background" (i.e., poles outside window (pre-calculated)
Window w = windows[nuc * max_num_windows + window];
sigT = E * w.T;
sigA = E * w.A;
sigF = E * w.F;
double dopp = 0.5;
// Loop over Poles within window, add contributions
for( int i = w.start; i < w.end; i++ )
{
Pole pole = poles[nuc * max_num_poles + i];
// Prep Z
RSComplex E_c = {E, 0};
RSComplex dopp_c = {dopp, 0};
RSComplex Z = c_mul(c_sub(E_c, pole.MP_EA), dopp_c);
// Evaluate Fadeeva Function
RSComplex faddeeva = fast_nuclear_W( Z );
// Update W
sigT += (c_mul( pole.MP_RT, c_mul(faddeeva, sigTfactors[pole.l_value]) )).r;
sigA += (c_mul( pole.MP_RA , faddeeva)).r;
sigF += (c_mul( pole.MP_RF , faddeeva)).r;
}
sigE = sigT - sigA;
micro_xs[0] = sigT;
micro_xs[1] = sigA;
micro_xs[2] = sigF;
micro_xs[3] = sigE;
}
// picks a material based on a probabilistic distribution
__device__ int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
__device__ void calculate_sig_T( int nuc, double E, Input input, double * pseudo_K0RS, RSComplex * sigTfactors )
{
double phi;
#pragma unroll
for( int i = 0; i < 4; i++ )
{
phi = pseudo_K0RS[nuc * input.numL + i] * sqrt(E);
if( i == 1 )
phi -= - atan( phi );
else if( i == 2 )
phi -= atan( 3.0 * phi / (3.0 - phi*phi));
else if( i == 3 )
phi -= atan(phi*(15.0-phi*phi)/(15.0-6.0*phi*phi));
phi *= 2.0;
sigTfactors[i].r = cos(phi);
sigTfactors[i].i = -sin(phi);
}
}
// This function uses a combination of the Abrarov Approximation
// and the QUICK_W three term asymptotic expansion.
// Only expected to use Abrarov ~0.5% of the time.
__device__ RSComplex fast_nuclear_W( RSComplex Z )
{
// Abrarov
if( c_abs(Z) < 6.0 )
{
// Precomputed parts for speeding things up
// (N = 10, Tm = 12.0)
RSComplex prefactor = {0, 8.124330e+01};
double an[10] = {
2.758402e-01,
2.245740e-01,
1.594149e-01,
9.866577e-02,
5.324414e-02,
2.505215e-02,
1.027747e-02,
3.676164e-03,
1.146494e-03,
3.117570e-04
};
double neg_1n[10] = {
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0,
-1.0,
1.0
};
double denominator_left[10] = {
9.869604e+00,
3.947842e+01,
8.882644e+01,
1.579137e+02,
2.467401e+02,
3.553058e+02,
4.836106e+02,
6.316547e+02,
7.994380e+02,
9.869604e+02
};
RSComplex t1 = {0, 12};
RSComplex t2 = {12, 0};
RSComplex i = {0,1};
RSComplex one = {1, 0};
RSComplex W = c_div(c_mul(i, ( c_sub(one, fast_cexp(c_mul(t1, Z))) )) , c_mul(t2, Z));
RSComplex sum = {0,0};
for( int n = 0; n < 10; n++ )
{
RSComplex t3 = {neg_1n[n], 0};
RSComplex top = c_sub(c_mul(t3, fast_cexp(c_mul(t1, Z))), one);
RSComplex t4 = {denominator_left[n], 0};
RSComplex t5 = {144, 0};
RSComplex bot = c_sub(t4, c_mul(t5,c_mul(Z,Z)));
RSComplex t6 = {an[n], 0};
sum = c_add(sum, c_mul(t6, c_div(top,bot)));
}
W = c_add(W, c_mul(prefactor, c_mul(Z, sum)));
return W;
}
else
{
// QUICK_2 3 Term Asymptotic Expansion (Accurate to O(1e-6)).
// Pre-computed parameters
RSComplex a = {0.512424224754768462984202823134979415014943561548661637413182,0};
RSComplex b = {0.275255128608410950901357962647054304017026259671664935783653, 0};
RSComplex c = {0.051765358792987823963876628425793170829107067780337219430904, 0};
RSComplex d = {2.724744871391589049098642037352945695982973740328335064216346, 0};
RSComplex i = {0,1};
RSComplex Z2 = c_mul(Z, Z);
// Three Term Asymptotic Expansion
RSComplex W = c_mul(c_mul(Z,i), (c_add(c_div(a,(c_sub(Z2, b))) , c_div(c,(c_sub(Z2, d))))));
return W;
}
}
__host__ __device__ double LCG_random_double(uint64_t * seed)
{
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
__host__ __device__ uint64_t LCG_random_int(uint64_t * seed)
{
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return *seed;
}
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
// Complex arithmetic functions
__device__ RSComplex c_add( RSComplex A, RSComplex B)
{
RSComplex C;
C.r = A.r + B.r;
C.i = A.i + B.i;
return C;
}
__device__ RSComplex c_sub( RSComplex A, RSComplex B)
{
RSComplex C;
C.r = A.r - B.r;
C.i = A.i - B.i;
return C;
}
__host__ __device__ RSComplex c_mul( RSComplex A, RSComplex B)
{
double a = A.r;
double b = A.i;
double c = B.r;
double d = B.i;
RSComplex C;
C.r = (a*c) - (b*d);
C.i = (a*d) + (b*c);
return C;
}
__device__ RSComplex c_div( RSComplex A, RSComplex B)
{
double a = A.r;
double b = A.i;
double c = B.r;
double d = B.i;
RSComplex C;
double denom = c*c + d*d;
C.r = ( (a*c) + (b*d) ) / denom;
C.i = ( (b*c) - (a*d) ) / denom;
return C;
}
__device__ double c_abs( RSComplex A)
{
return sqrt(A.r*A.r + A.i * A.i);
}
// Fast (but inaccurate) exponential function
// Written By "ACMer":
// https://codingforspeed.com/using-faster-exponential-approximation/
// We use our own to avoid small differences in compiler specific
// exp() intrinsic implementations that make it difficult to verify
// if the code is working correctly or not.
__device__ double fast_exp(double x)
{
x = 1.0 + x * 0.000244140625;
x *= x; x *= x; x *= x; x *= x;
x *= x; x *= x; x *= x; x *= x;
x *= x; x *= x; x *= x; x *= x;
return x;
}
// Implementation based on:
// z = x + iy
// cexp(z) = e^x * (cos(y) + i * sin(y))
__device__ RSComplex fast_cexp( RSComplex z )
{
double x = z.r;
double y = z.i;
// For consistency across architectures, we
// will use our own exponetial implementation
//double t1 = exp(x);
double t1 = fast_exp(x);
double t2 = cos(y);
double t3 = sin(y);
RSComplex t4 = {t2, t3};
RSComplex t5 = {t1, 0};
RSComplex result = c_mul(t5, (t4));
return result;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies
// specific to GPU. By default, RSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort
// + Energy Sort
////////////////////////////////////////////////////////////////////////////////////
// This optimization builds on optimization 4, adding in a second sort by energy.
// It is extremely fast, as now most of the threads within a warp will be hitting
// the same indices in the lookup grids. This greatly reduces thread divergence and
// greatly improves cache efficiency and re-use.
//
// However, it is unlikely that this exact optimization would be possible in a real
// application like OpenMC. One major difference is that particle objects are quite
// large, often having 50+ variable fields, such that sorting them in memory becomes
// rather expensive. Instead, the best possible option would probably be to create
// intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly.
////////////////////////////////////////////////////////////////////////////////////
__global__ void sampling_kernel(Input in, SimulationData GSD )
{
// The lookup ID.
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Store sample data in state array
GSD.p_energy_samples[i] = p_energy;
GSD.mat_samples[i] = mat;
}
__global__ void xs_lookup_kernel_optimization_1(Input in, SimulationData GSD, int m, int n_lookups, int offset )
{
// The lookup ID. Used to set the seed, and to store the verification value
int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= n_lookups )
return;
i += offset;
// Check that our material type matches the kernel material
int mat = GSD.mat_samples[i];
if( mat != m )
return;
double macro_xs[4] = {0};
calculate_macro_xs( macro_xs, mat, GSD.p_energy_samples[i], in, GSD.num_nucs, GSD.mats, GSD.max_num_nucs, GSD.concs, GSD.n_windows, GSD.pseudo_K0RS, GSD.windows, GSD.poles, GSD.max_num_windows, GSD.max_num_poles );
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we write to a global
// verification array that will get reduced after this kernel comples.
double max = -DBL_MAX;
int max_idx = 0;
for(int x = 0; x < 4; x++ )
{
if( macro_xs[x] > max )
{
max = macro_xs[x];
max_idx = x;
}
}
GSD.verification[i] = max_idx+1;
}
void run_event_based_simulation_optimization_1(Input in, SimulationData GSD, unsigned long * vhash_result)
{
const char * optimization_name = "Optimization 1 - Material & Energy Sorts + Material-specific Kernels";
printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
sampling_kernel<<<nblocks, nthreads>>>( in, GSD );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Count the number of fuel material lookups that need to be performed (fuel id = 0)
int n_lookups_per_material[12];
for( int m = 0; m < 12; m++ )
n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m);
// Sort by material first
thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples);
// Now, sort each material by energy
int offset = 0;
for( int m = 0; m < 12; m++ )
{
thrust::sort_by_key(thrust::device, GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset);
offset += n_lookups_per_material[m];
}
// Launch all material kernels individually
offset = 0;
for( int m = 0; m < 12; m++ )
{
nthreads = 32;
nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads);
xs_lookup_kernel_optimization_1<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset );
offset += n_lookups_per_material[m];
}
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
*vhash_result = verification_scalar;
}
|
np_boolean_mask_assign.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file np_boolean_mask_assign.cu
* \brief GPU implementation of Boolean Mask Assign
*/
#include <hipcub/hipcub.hpp>
#include "../../common/utils.h"
#include "../contrib/boolean_mask-inl.h"
namespace mxnet {
namespace op {
template<bool scalar>
struct BooleanAssignGPUKernel {
private:
static size_t __device__ bin_search(const size_t* idx,
const size_t idx_size,
const size_t i) {
size_t left = 0, right = idx_size, mid = (left + right) / 2;
while (left != right) {
if (idx[mid] == i + 1) {
if (idx[mid - 1] == i) {
mid -= 1;
break;
} else if (idx[mid - 1] == i + 1) {
right = mid;
mid = (left + right) / 2;
}
} else if (idx[mid] == i) {
if (idx[mid + 1] == i + 1) {
break;
} else {
left = mid;
mid = (left + right + 1) / 2;
}
} else if (idx[mid] < i + 1) {
left = mid;
mid = (left + right + 1) / 2;
} else if (idx[mid] > i + 1) {
right = mid;
mid = (left + right) / 2;
}
}
return mid;
}
public:
template<typename DType>
static void __device__ Map(int i,
DType* data,
const size_t* idx,
const size_t idx_size,
const size_t leading,
const size_t middle,
const size_t valid_num,
const size_t trailing,
const DType val) {
// binary search for the turning point
size_t m = i / trailing % valid_num;
size_t l = i / trailing / valid_num;
size_t mid = bin_search(idx, idx_size, m);
// final answer is in mid
// i = l * valid_num * trailing + m * trailing + t
// dst = l * middle * trailing + mid * trailing + t
data[i + (l * (middle - valid_num) + (mid - m)) * trailing] = val;
}
template<typename DType>
static void __device__ Map(int i,
DType* data,
const size_t* idx,
const size_t idx_size,
const size_t leading,
const size_t middle,
const size_t valid_num,
const size_t trailing,
DType* tensor,
const bool broadcast = false) {
// binary search for the turning point
size_t m = i / trailing % valid_num;
size_t l = i / trailing / valid_num;
size_t mid = bin_search(idx, idx_size, m);
size_t dst = i + (l * (middle - valid_num) + (mid - m)) * trailing;
// final answer is in mid
if (scalar) {
data[dst] = tensor[0];
} else {
data[dst] = broadcast ? tensor[l * trailing + i % trailing] : tensor[i];
}
}
};
struct NonZeroWithCast {
template<typename OType, typename IType>
static void __device__ Map(int i, OType* out, const IType* in) {
out[i] = (in[i]) ? OType(1) : OType(0);
}
};
// completing the prefix_sum vector and return the pointer to it
template<typename DType>
size_t* GetValidNumGPU(const OpContext &ctx, const DType *idx, const size_t idx_size) {
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow_op;
size_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
Stream<gpu>* s = ctx.get_stream<gpu>();
hipStream_t stream = Stream<gpu>::GetStream(s);
// Calculate total temporary memory size
hipcub::DeviceScan::ExclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size + 1,
stream);
size_t buffer_size = (idx_size + 1) * sizeof(size_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), s);
prefix_sum = reinterpret_cast<size_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
// Robustly set the bool values in mask
// TODO(haojin2): Get a more efficient way to preset the buffer
Kernel<set_zero, gpu>::Launch(s, idx_size + 1, prefix_sum);
if (!std::is_same<DType, bool>::value) {
Kernel<NonZeroWithCast, gpu>::Launch(s, idx_size, prefix_sum, idx);
} else {
Kernel<identity_with_cast, gpu>::Launch(s, idx_size, prefix_sum, idx);
}
// Calculate prefix sum
hipcub::DeviceScan::ExclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size + 1,
stream);
return prefix_sum;
}
void NumpyBooleanAssignForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK(inputs.size() == 2U || inputs.size() == 3U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
CHECK_EQ(req[0], kWriteInplace)
<< "Only WriteInplace is supported for npi_boolean_assign";
Stream<gpu>* s = ctx.get_stream<gpu>();
const TBlob& data = inputs[0];
const TShape& dshape = data.shape_;
const TBlob& mask = inputs[1];
const TShape& mshape = mask.shape_;
const int start_axis = std::stoi(common::attr_value_string(attrs, "start_axis", "0"));
// Get valid_num
size_t mask_size = mask.shape_.Size();
size_t valid_num = 0;
size_t* prefix_sum = nullptr;
if (mask_size != 0) {
MSHADOW_TYPE_SWITCH_WITH_BOOL(mask.type_flag_, MType, {
prefix_sum = GetValidNumGPU<MType>(ctx, mask.dptr<MType>(), mask_size);
});
hipStream_t stream = mshadow::Stream<gpu>::GetStream(s);
CUDA_CALL(hipMemcpyAsync(&valid_num, &prefix_sum[mask_size], sizeof(size_t),
hipMemcpyDeviceToHost, stream));
CUDA_CALL(hipStreamSynchronize(stream));
}
// If there's no True in mask, return directly
if (valid_num == 0) return;
const TShape& vshape = inputs[2].shape_;
if (inputs.size() == 3U) {
// tensor case
if (inputs[2].shape_.Size() != 1) {
auto vndim = vshape.ndim();
auto dndim = dshape.ndim();
auto mndim = mshape.ndim();
CHECK(vndim <= (dndim - mndim + 1));
if ((vndim == (dndim - mndim + 1)) && (vshape[start_axis] != 1)) {
// tensor case, check tensor size equal to or broadcastable with valid_num
CHECK_EQ(static_cast<size_t>(valid_num), vshape[start_axis])
<< "boolean array indexing assignment cannot assign " << vshape
<< " input values to the " << valid_num << " output values where the mask is true"
<< std::endl;
}
}
}
size_t leading = 1U;
size_t middle = mask_size;
size_t trailing = 1U;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i < start_axis) {
leading *= dshape[i];
}
if (i >= start_axis + mshape.ndim()) {
trailing *= dshape[i];
}
}
if (inputs.size() == 3U) {
if (inputs[2].shape_.Size() == 1) {
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<true>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
leading, middle, valid_num, trailing, inputs[2].dptr<DType>());
});
} else {
bool need_broadcast = (vshape.ndim() == (dshape.ndim() - mshape.ndim() + 1)) ?
(vshape[start_axis] == 1) :
true;
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<false>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
leading, middle, valid_num, trailing, inputs[2].dptr<DType>(), need_broadcast);
});
}
} else {
CHECK(attrs.dict.find("value") != attrs.dict.end()) << "value is not provided";
double value = std::stod(attrs.dict.at("value"));
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<true>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
leading, middle, valid_num, trailing, static_cast<DType>(value));
});
}
}
NNVM_REGISTER_OP(_npi_boolean_mask_assign_scalar)
.set_attr<FCompute>("FCompute<gpu>", NumpyBooleanAssignForwardGPU);
NNVM_REGISTER_OP(_npi_boolean_mask_assign_tensor)
.set_attr<FCompute>("FCompute<gpu>", NumpyBooleanAssignForwardGPU);
} // namespace op
} // namespace mxnet
| np_boolean_mask_assign.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file np_boolean_mask_assign.cu
* \brief GPU implementation of Boolean Mask Assign
*/
#include <cub/cub.cuh>
#include "../../common/utils.h"
#include "../contrib/boolean_mask-inl.h"
namespace mxnet {
namespace op {
template<bool scalar>
struct BooleanAssignGPUKernel {
private:
static size_t __device__ bin_search(const size_t* idx,
const size_t idx_size,
const size_t i) {
size_t left = 0, right = idx_size, mid = (left + right) / 2;
while (left != right) {
if (idx[mid] == i + 1) {
if (idx[mid - 1] == i) {
mid -= 1;
break;
} else if (idx[mid - 1] == i + 1) {
right = mid;
mid = (left + right) / 2;
}
} else if (idx[mid] == i) {
if (idx[mid + 1] == i + 1) {
break;
} else {
left = mid;
mid = (left + right + 1) / 2;
}
} else if (idx[mid] < i + 1) {
left = mid;
mid = (left + right + 1) / 2;
} else if (idx[mid] > i + 1) {
right = mid;
mid = (left + right) / 2;
}
}
return mid;
}
public:
template<typename DType>
static void __device__ Map(int i,
DType* data,
const size_t* idx,
const size_t idx_size,
const size_t leading,
const size_t middle,
const size_t valid_num,
const size_t trailing,
const DType val) {
// binary search for the turning point
size_t m = i / trailing % valid_num;
size_t l = i / trailing / valid_num;
size_t mid = bin_search(idx, idx_size, m);
// final answer is in mid
// i = l * valid_num * trailing + m * trailing + t
// dst = l * middle * trailing + mid * trailing + t
data[i + (l * (middle - valid_num) + (mid - m)) * trailing] = val;
}
template<typename DType>
static void __device__ Map(int i,
DType* data,
const size_t* idx,
const size_t idx_size,
const size_t leading,
const size_t middle,
const size_t valid_num,
const size_t trailing,
DType* tensor,
const bool broadcast = false) {
// binary search for the turning point
size_t m = i / trailing % valid_num;
size_t l = i / trailing / valid_num;
size_t mid = bin_search(idx, idx_size, m);
size_t dst = i + (l * (middle - valid_num) + (mid - m)) * trailing;
// final answer is in mid
if (scalar) {
data[dst] = tensor[0];
} else {
data[dst] = broadcast ? tensor[l * trailing + i % trailing] : tensor[i];
}
}
};
struct NonZeroWithCast {
template<typename OType, typename IType>
static void __device__ Map(int i, OType* out, const IType* in) {
out[i] = (in[i]) ? OType(1) : OType(0);
}
};
// completing the prefix_sum vector and return the pointer to it
template<typename DType>
size_t* GetValidNumGPU(const OpContext &ctx, const DType *idx, const size_t idx_size) {
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow_op;
size_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
Stream<gpu>* s = ctx.get_stream<gpu>();
cudaStream_t stream = Stream<gpu>::GetStream(s);
// Calculate total temporary memory size
cub::DeviceScan::ExclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size + 1,
stream);
size_t buffer_size = (idx_size + 1) * sizeof(size_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), s);
prefix_sum = reinterpret_cast<size_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
// Robustly set the bool values in mask
// TODO(haojin2): Get a more efficient way to preset the buffer
Kernel<set_zero, gpu>::Launch(s, idx_size + 1, prefix_sum);
if (!std::is_same<DType, bool>::value) {
Kernel<NonZeroWithCast, gpu>::Launch(s, idx_size, prefix_sum, idx);
} else {
Kernel<identity_with_cast, gpu>::Launch(s, idx_size, prefix_sum, idx);
}
// Calculate prefix sum
cub::DeviceScan::ExclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
idx_size + 1,
stream);
return prefix_sum;
}
void NumpyBooleanAssignForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mxnet_op;
CHECK(inputs.size() == 2U || inputs.size() == 3U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
CHECK_EQ(req[0], kWriteInplace)
<< "Only WriteInplace is supported for npi_boolean_assign";
Stream<gpu>* s = ctx.get_stream<gpu>();
const TBlob& data = inputs[0];
const TShape& dshape = data.shape_;
const TBlob& mask = inputs[1];
const TShape& mshape = mask.shape_;
const int start_axis = std::stoi(common::attr_value_string(attrs, "start_axis", "0"));
// Get valid_num
size_t mask_size = mask.shape_.Size();
size_t valid_num = 0;
size_t* prefix_sum = nullptr;
if (mask_size != 0) {
MSHADOW_TYPE_SWITCH_WITH_BOOL(mask.type_flag_, MType, {
prefix_sum = GetValidNumGPU<MType>(ctx, mask.dptr<MType>(), mask_size);
});
cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s);
CUDA_CALL(cudaMemcpyAsync(&valid_num, &prefix_sum[mask_size], sizeof(size_t),
cudaMemcpyDeviceToHost, stream));
CUDA_CALL(cudaStreamSynchronize(stream));
}
// If there's no True in mask, return directly
if (valid_num == 0) return;
const TShape& vshape = inputs[2].shape_;
if (inputs.size() == 3U) {
// tensor case
if (inputs[2].shape_.Size() != 1) {
auto vndim = vshape.ndim();
auto dndim = dshape.ndim();
auto mndim = mshape.ndim();
CHECK(vndim <= (dndim - mndim + 1));
if ((vndim == (dndim - mndim + 1)) && (vshape[start_axis] != 1)) {
// tensor case, check tensor size equal to or broadcastable with valid_num
CHECK_EQ(static_cast<size_t>(valid_num), vshape[start_axis])
<< "boolean array indexing assignment cannot assign " << vshape
<< " input values to the " << valid_num << " output values where the mask is true"
<< std::endl;
}
}
}
size_t leading = 1U;
size_t middle = mask_size;
size_t trailing = 1U;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i < start_axis) {
leading *= dshape[i];
}
if (i >= start_axis + mshape.ndim()) {
trailing *= dshape[i];
}
}
if (inputs.size() == 3U) {
if (inputs[2].shape_.Size() == 1) {
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<true>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
leading, middle, valid_num, trailing, inputs[2].dptr<DType>());
});
} else {
bool need_broadcast = (vshape.ndim() == (dshape.ndim() - mshape.ndim() + 1)) ?
(vshape[start_axis] == 1) :
true;
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<false>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
leading, middle, valid_num, trailing, inputs[2].dptr<DType>(), need_broadcast);
});
}
} else {
CHECK(attrs.dict.find("value") != attrs.dict.end()) << "value is not provided";
double value = std::stod(attrs.dict.at("value"));
MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, {
Kernel<BooleanAssignGPUKernel<true>, gpu>::Launch(
s, leading * valid_num * trailing, data.dptr<DType>(), prefix_sum, mask_size + 1,
leading, middle, valid_num, trailing, static_cast<DType>(value));
});
}
}
NNVM_REGISTER_OP(_npi_boolean_mask_assign_scalar)
.set_attr<FCompute>("FCompute<gpu>", NumpyBooleanAssignForwardGPU);
NNVM_REGISTER_OP(_npi_boolean_mask_assign_tensor)
.set_attr<FCompute>("FCompute<gpu>", NumpyBooleanAssignForwardGPU);
} // namespace op
} // namespace mxnet
|
c233a4d9aa07f77082a40a5be15c26fb412e1979.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_cooperative_groups.h>
#include <math.h>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "block_reduce.h"
#include "kernels.h"
namespace cg = cooperative_groups;
const float EPSILON = 1e-8f;
/**
@brief: softmax_kernel
Softmax forward kernel for
enc-self-attn, dec-self-attn, encdec-attn
@thread
gridDim.x = dynamic
gridDim.y = batch_size
gridDim.z = nhead
blockDim.x = from_len
@param
inp: [batch_size, nhead, from_len, to_len], softmax input.
attn_mask: [batch_size, to_len], padding tokens are -inf,
non padding tokens are 0.
attn_mask!=nullptr for enc-self-attn and enc-dec-attn
attn_mask=nullptr and mask_future=ture for dec-self-attn training
attn_mask=nullptr and mask_future=false for dec-self-attn infer
*/
template <typename T, int block_dim, int ele_per_thread>
__global__ void ker_attn_softmax(T *inp, const T *attn_mask, int from_len,
int to_len, bool mask_future) {
int batch_id = blockIdx.y;
int head_id = blockIdx.z;
const int nhead = gridDim.z;
const int token_per_reduce = 1;
typedef cub::BlockLoad<T, block_dim, ele_per_thread,
cub::BLOCK_LOAD_VECTORIZE>
BlockLoad;
__shared__ typename BlockLoad::TempStorage ts_load;
typedef cub::BlockStore<T, block_dim, ele_per_thread,
cub::BLOCK_STORE_VECTORIZE>
BlockStore;
__shared__ typename BlockStore::TempStorage ts_store;
T mval[ele_per_thread];
if (attn_mask) {
attn_mask += batch_id * to_len;
BlockLoad(ts_load).Load(attn_mask, mval, to_len, REDUCE_FLOAT_INF_NEG);
}
inp += flat_3dim(batch_id, head_id, 0, nhead, from_len * to_len);
for (int token_id = blockIdx.x * token_per_reduce; token_id < from_len;
token_id += gridDim.x * token_per_reduce) {
T inp_val[token_per_reduce][ele_per_thread];
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
BlockLoad(ts_load).Load(inp + (token_id + i) * to_len, inp_val[i], to_len,
REDUCE_FLOAT_INF_NEG);
}
/* step 1. compute max */
// thread local max
float val[token_per_reduce][ele_per_thread];
float l_max[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_max[i] = REDUCE_FLOAT_INF_NEG;
for (int j = 0; j < ele_per_thread; j++) {
if (attn_mask) {
val[i][j] = (float)inp_val[i][j] + (float)mval[j];
} else {
if (mask_future && ele_per_thread * threadIdx.x + j > token_id + i) {
val[i][j] = REDUCE_FLOAT_INF_NEG;
} else {
val[i][j] = (float)inp_val[i][j];
}
}
l_max[i] = fmaxf(l_max[i], val[i][j]);
}
}
// block reduce max
blockReduce<ReduceType::kMax, token_per_reduce>(l_max);
// write shared
__shared__ float s_max[token_per_reduce];
if (threadIdx.x == 0) {
for (int i = 0; i < token_per_reduce; i++) {
s_max[i] = l_max[i];
}
}
__syncthreads();
/* step 2. compute sum */
// thread local sum
float l_sum[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_sum[i] = 0.f;
for (int j = 0; j < ele_per_thread; j++) {
val[i][j] = __expf(val[i][j] - s_max[i]);
l_sum[i] += val[i][j];
}
}
// block reduce sum
blockReduce<ReduceType::kSum, token_per_reduce>(l_sum);
// write shared
__shared__ float s_sum[token_per_reduce];
if (threadIdx.x == 0) {
for (int i = 0; i < token_per_reduce; i++) {
s_sum[i] = __fdividef(1.0f, l_sum[i] + EPSILON);
}
}
__syncthreads();
/* step 3. compute final result */
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
for (int j = 0; j < ele_per_thread; j++) {
inp_val[i][j] = (T)(val[i][j] * s_sum[i]);
}
BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i],
to_len);
}
} // blockIdx.x
}
template <typename T, int block_dim, int ele_per_thread>
__global__ void ker_attn_softmax_lt32(T *inp, const T *attn_mask, int from_len,
int to_len, bool mask_future) {
int batch_id = blockIdx.y;
int head_id = blockIdx.z;
const int nhead = gridDim.z;
const int token_per_reduce = 1;
typedef cub::BlockLoad<T, block_dim, ele_per_thread,
cub::BLOCK_LOAD_VECTORIZE>
BlockLoad;
__shared__ typename BlockLoad::TempStorage ts_load;
typedef cub::BlockStore<T, block_dim, ele_per_thread,
cub::BLOCK_STORE_VECTORIZE>
BlockStore;
__shared__ typename BlockStore::TempStorage ts_store;
T mval[ele_per_thread];
if (attn_mask) {
attn_mask += batch_id * to_len;
BlockLoad(ts_load).Load(attn_mask, mval, to_len, REDUCE_FLOAT_INF_NEG);
}
inp += flat_3dim(batch_id, head_id, 0, nhead, from_len * to_len);
for (int token_id = blockIdx.x * token_per_reduce; token_id < from_len;
token_id += gridDim.x * token_per_reduce) {
T inp_val[token_per_reduce][ele_per_thread];
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
BlockLoad(ts_load).Load(inp + (token_id + i) * to_len, inp_val[i], to_len,
REDUCE_FLOAT_INF_NEG);
}
/* step 1. compute max */
// thread local max
float val[token_per_reduce][ele_per_thread];
float l_max[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_max[i] = REDUCE_FLOAT_INF_NEG;
for (int j = 0; j < ele_per_thread; j++) {
if (attn_mask) {
val[i][j] = (float)inp_val[i][j] + (float)mval[j];
} else {
if (mask_future && ele_per_thread * threadIdx.x + j > token_id + i) {
val[i][j] = REDUCE_FLOAT_INF_NEG;
} else {
val[i][j] = (float)inp_val[i][j];
}
}
l_max[i] = fmaxf(l_max[i], val[i][j]);
}
}
// warp reduce max
warpReduce<ReduceType::kMax, token_per_reduce>(l_max);
/* step 2. compute sum */
// thread local sum
float l_sum[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_sum[i] = 0.f;
for (int j = 0; j < ele_per_thread; j++) {
val[i][j] = __expf(val[i][j] - l_max[i]);
l_sum[i] += val[i][j];
}
}
// warp reduce sum
warpReduce<ReduceType::kSum, token_per_reduce>(l_sum);
/* step 3. compute final result */
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
l_sum[i] = __fdividef(1.0f, l_sum[i] + EPSILON);
for (int j = 0; j < ele_per_thread; j++) {
inp_val[i][j] = (T)(val[i][j] * l_sum[i]);
}
BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i],
to_len);
}
} // blockIdx.x
}
/*
attn_mask!=nullptr for enc-self-attn and enc-dec-attn
attn_mask=nullptr and mask_future=ture for dec-self-attn training
attn_mask=nullptr and mask_future=false for dec-self-attn infer
*/
template <>
void launch_attn_softmax<float>(float *inp, const float *attn_mask,
int batch_size, int nhead, int from_len,
int to_len, bool mask_future,
hipStream_t stream) {
dim3 grid_dim(1, batch_size, nhead);
if (to_len <= 32) {
hipLaunchKernelGGL(( ker_attn_softmax_lt32<float, 32, 1>), dim3(grid_dim), dim3(32), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 64) {
hipLaunchKernelGGL(( ker_attn_softmax_lt32<float, 32, 2>), dim3(grid_dim), dim3(32), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 128) {
grid_dim.x = 16;
hipLaunchKernelGGL(( ker_attn_softmax<float, 64, 2>), dim3(grid_dim), dim3(64), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 256) {
grid_dim.x = 32;
hipLaunchKernelGGL(( ker_attn_softmax<float, 128, 2>), dim3(grid_dim), dim3(128), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 512) {
grid_dim.x = 64;
hipLaunchKernelGGL(( ker_attn_softmax<float, 256, 2>), dim3(grid_dim), dim3(256), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else {
throw std::runtime_error(
"Sequence length greater than 512 is currently not supported");
}
}
template <>
void launch_attn_softmax<__half>(__half *inp, const __half *attn_mask,
int batch_size, int nhead, int from_len,
int to_len, bool mask_future,
hipStream_t stream) {
dim3 grid_dim(1, batch_size, nhead);
if (to_len <= 32) {
hipLaunchKernelGGL(( ker_attn_softmax_lt32<__half, 32, 1>), dim3(grid_dim), dim3(32), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 64) {
hipLaunchKernelGGL(( ker_attn_softmax_lt32<__half, 32, 2>), dim3(grid_dim), dim3(32), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 128) {
grid_dim.x = 8;
hipLaunchKernelGGL(( ker_attn_softmax<__half, 64, 2>), dim3(grid_dim), dim3(64), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 256) {
grid_dim.x = 16;
hipLaunchKernelGGL(( ker_attn_softmax<__half, 128, 2>), dim3(grid_dim), dim3(128), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 512) {
grid_dim.x = 32;
hipLaunchKernelGGL(( ker_attn_softmax<__half, 256, 2>), dim3(grid_dim), dim3(256), 0, stream,
inp, attn_mask, from_len, to_len, mask_future);
} else {
throw std::runtime_error(
"Sequence length greater than 512 is currently not supported");
}
}
/**
@brief: ker_attn_softmax_bw
Softmax backward in self attention.
@thread
gridDim.x = batch_size * nhead * seq_len / warps_per_block
blockDim.x = WARP_SIZE
blockDim.y = warps_per_block
@param
grad: [batch_size, nhead, seq_len, seq_len], output grad.
output: [batch_size, nhead, seq_len, seq_len], output of softmax forward.
*/
template <typename T, int ITERATIONS>
__global__ void ker_attn_softmax_bw(T *grad, const T *inp, int softmax_length) {
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
inp += offset;
T grad_reg[ITERATIONS];
T inp_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
inp_reg[i] = inp[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)inp_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (T)((float)inp_reg[i] * ((float)grad_reg[i] - sum));
}
}
template <typename T>
void launch_attn_softmax_bw(T *out_grad, const T *soft_inp, int rows,
int softmax_len, hipStream_t stream) {
const int warps_per_block = 4;
// rows = batch_size * nhead * from_len
dim3 grid_dim(rows / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
if (softmax_len <= 32)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 1>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 64)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 2>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 128)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 4>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 256)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 8>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 384)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 12>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 512)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 16>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 768)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 24>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 1024)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 32>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else if (softmax_len <= 2048)
hipLaunchKernelGGL(( ker_attn_softmax_bw<T, 64>)
, dim3(grid_dim), dim3(block_dim), 0, stream, out_grad, soft_inp, softmax_len);
else
throw std::runtime_error(
std::string(
"Special sequence length found in softmax backward, seq_len: ") +
std::to_string(softmax_len));
}
template void launch_attn_softmax_bw<__half>(__half *out_grad,
const __half *soft_inp, int rows,
int softmax_len,
hipStream_t stream);
template void launch_attn_softmax_bw<float>(float *out_grad,
const float *soft_inp, int rows,
int softmax_len,
hipStream_t stream);
| c233a4d9aa07f77082a40a5be15c26fb412e1979.cu | #include <cooperative_groups.h>
#include <math.h>
#include <cub/block/block_load.cuh>
#include <cub/cub.cuh>
#include "block_reduce.h"
#include "kernels.h"
namespace cg = cooperative_groups;
const float EPSILON = 1e-8f;
/**
@brief: softmax_kernel
Softmax forward kernel for
enc-self-attn, dec-self-attn, encdec-attn
@thread
gridDim.x = dynamic
gridDim.y = batch_size
gridDim.z = nhead
blockDim.x = from_len
@param
inp: [batch_size, nhead, from_len, to_len], softmax input.
attn_mask: [batch_size, to_len], padding tokens are -inf,
non padding tokens are 0.
attn_mask!=nullptr for enc-self-attn and enc-dec-attn
attn_mask=nullptr and mask_future=ture for dec-self-attn training
attn_mask=nullptr and mask_future=false for dec-self-attn infer
*/
template <typename T, int block_dim, int ele_per_thread>
__global__ void ker_attn_softmax(T *inp, const T *attn_mask, int from_len,
int to_len, bool mask_future) {
int batch_id = blockIdx.y;
int head_id = blockIdx.z;
const int nhead = gridDim.z;
const int token_per_reduce = 1;
typedef cub::BlockLoad<T, block_dim, ele_per_thread,
cub::BLOCK_LOAD_VECTORIZE>
BlockLoad;
__shared__ typename BlockLoad::TempStorage ts_load;
typedef cub::BlockStore<T, block_dim, ele_per_thread,
cub::BLOCK_STORE_VECTORIZE>
BlockStore;
__shared__ typename BlockStore::TempStorage ts_store;
T mval[ele_per_thread];
if (attn_mask) {
attn_mask += batch_id * to_len;
BlockLoad(ts_load).Load(attn_mask, mval, to_len, REDUCE_FLOAT_INF_NEG);
}
inp += flat_3dim(batch_id, head_id, 0, nhead, from_len * to_len);
for (int token_id = blockIdx.x * token_per_reduce; token_id < from_len;
token_id += gridDim.x * token_per_reduce) {
T inp_val[token_per_reduce][ele_per_thread];
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
BlockLoad(ts_load).Load(inp + (token_id + i) * to_len, inp_val[i], to_len,
REDUCE_FLOAT_INF_NEG);
}
/* step 1. compute max */
// thread local max
float val[token_per_reduce][ele_per_thread];
float l_max[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_max[i] = REDUCE_FLOAT_INF_NEG;
for (int j = 0; j < ele_per_thread; j++) {
if (attn_mask) {
val[i][j] = (float)inp_val[i][j] + (float)mval[j];
} else {
if (mask_future && ele_per_thread * threadIdx.x + j > token_id + i) {
val[i][j] = REDUCE_FLOAT_INF_NEG;
} else {
val[i][j] = (float)inp_val[i][j];
}
}
l_max[i] = fmaxf(l_max[i], val[i][j]);
}
}
// block reduce max
blockReduce<ReduceType::kMax, token_per_reduce>(l_max);
// write shared
__shared__ float s_max[token_per_reduce];
if (threadIdx.x == 0) {
for (int i = 0; i < token_per_reduce; i++) {
s_max[i] = l_max[i];
}
}
__syncthreads();
/* step 2. compute sum */
// thread local sum
float l_sum[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_sum[i] = 0.f;
for (int j = 0; j < ele_per_thread; j++) {
val[i][j] = __expf(val[i][j] - s_max[i]);
l_sum[i] += val[i][j];
}
}
// block reduce sum
blockReduce<ReduceType::kSum, token_per_reduce>(l_sum);
// write shared
__shared__ float s_sum[token_per_reduce];
if (threadIdx.x == 0) {
for (int i = 0; i < token_per_reduce; i++) {
s_sum[i] = __fdividef(1.0f, l_sum[i] + EPSILON);
}
}
__syncthreads();
/* step 3. compute final result */
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
for (int j = 0; j < ele_per_thread; j++) {
inp_val[i][j] = (T)(val[i][j] * s_sum[i]);
}
BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i],
to_len);
}
} // blockIdx.x
}
template <typename T, int block_dim, int ele_per_thread>
__global__ void ker_attn_softmax_lt32(T *inp, const T *attn_mask, int from_len,
int to_len, bool mask_future) {
int batch_id = blockIdx.y;
int head_id = blockIdx.z;
const int nhead = gridDim.z;
const int token_per_reduce = 1;
typedef cub::BlockLoad<T, block_dim, ele_per_thread,
cub::BLOCK_LOAD_VECTORIZE>
BlockLoad;
__shared__ typename BlockLoad::TempStorage ts_load;
typedef cub::BlockStore<T, block_dim, ele_per_thread,
cub::BLOCK_STORE_VECTORIZE>
BlockStore;
__shared__ typename BlockStore::TempStorage ts_store;
T mval[ele_per_thread];
if (attn_mask) {
attn_mask += batch_id * to_len;
BlockLoad(ts_load).Load(attn_mask, mval, to_len, REDUCE_FLOAT_INF_NEG);
}
inp += flat_3dim(batch_id, head_id, 0, nhead, from_len * to_len);
for (int token_id = blockIdx.x * token_per_reduce; token_id < from_len;
token_id += gridDim.x * token_per_reduce) {
T inp_val[token_per_reduce][ele_per_thread];
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
BlockLoad(ts_load).Load(inp + (token_id + i) * to_len, inp_val[i], to_len,
REDUCE_FLOAT_INF_NEG);
}
/* step 1. compute max */
// thread local max
float val[token_per_reduce][ele_per_thread];
float l_max[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_max[i] = REDUCE_FLOAT_INF_NEG;
for (int j = 0; j < ele_per_thread; j++) {
if (attn_mask) {
val[i][j] = (float)inp_val[i][j] + (float)mval[j];
} else {
if (mask_future && ele_per_thread * threadIdx.x + j > token_id + i) {
val[i][j] = REDUCE_FLOAT_INF_NEG;
} else {
val[i][j] = (float)inp_val[i][j];
}
}
l_max[i] = fmaxf(l_max[i], val[i][j]);
}
}
// warp reduce max
warpReduce<ReduceType::kMax, token_per_reduce>(l_max);
/* step 2. compute sum */
// thread local sum
float l_sum[token_per_reduce];
for (int i = 0; i < token_per_reduce; i++) {
l_sum[i] = 0.f;
for (int j = 0; j < ele_per_thread; j++) {
val[i][j] = __expf(val[i][j] - l_max[i]);
l_sum[i] += val[i][j];
}
}
// warp reduce sum
warpReduce<ReduceType::kSum, token_per_reduce>(l_sum);
/* step 3. compute final result */
for (int i = 0; i < token_per_reduce && (token_id + i) < from_len; i++) {
l_sum[i] = __fdividef(1.0f, l_sum[i] + EPSILON);
for (int j = 0; j < ele_per_thread; j++) {
inp_val[i][j] = (T)(val[i][j] * l_sum[i]);
}
BlockStore(ts_store).Store(inp + (token_id + i) * to_len, inp_val[i],
to_len);
}
} // blockIdx.x
}
/*
attn_mask!=nullptr for enc-self-attn and enc-dec-attn
attn_mask=nullptr and mask_future=ture for dec-self-attn training
attn_mask=nullptr and mask_future=false for dec-self-attn infer
*/
template <>
void launch_attn_softmax<float>(float *inp, const float *attn_mask,
int batch_size, int nhead, int from_len,
int to_len, bool mask_future,
cudaStream_t stream) {
dim3 grid_dim(1, batch_size, nhead);
if (to_len <= 32) {
ker_attn_softmax_lt32<float, 32, 1><<<grid_dim, 32, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 64) {
ker_attn_softmax_lt32<float, 32, 2><<<grid_dim, 32, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 128) {
grid_dim.x = 16;
ker_attn_softmax<float, 64, 2><<<grid_dim, 64, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 256) {
grid_dim.x = 32;
ker_attn_softmax<float, 128, 2><<<grid_dim, 128, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 512) {
grid_dim.x = 64;
ker_attn_softmax<float, 256, 2><<<grid_dim, 256, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else {
throw std::runtime_error(
"Sequence length greater than 512 is currently not supported");
}
}
template <>
void launch_attn_softmax<__half>(__half *inp, const __half *attn_mask,
int batch_size, int nhead, int from_len,
int to_len, bool mask_future,
cudaStream_t stream) {
dim3 grid_dim(1, batch_size, nhead);
if (to_len <= 32) {
ker_attn_softmax_lt32<__half, 32, 1><<<grid_dim, 32, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 64) {
ker_attn_softmax_lt32<__half, 32, 2><<<grid_dim, 32, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 128) {
grid_dim.x = 8;
ker_attn_softmax<__half, 64, 2><<<grid_dim, 64, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 256) {
grid_dim.x = 16;
ker_attn_softmax<__half, 128, 2><<<grid_dim, 128, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else if (to_len <= 512) {
grid_dim.x = 32;
ker_attn_softmax<__half, 256, 2><<<grid_dim, 256, 0, stream>>>(
inp, attn_mask, from_len, to_len, mask_future);
} else {
throw std::runtime_error(
"Sequence length greater than 512 is currently not supported");
}
}
/**
@brief: ker_attn_softmax_bw
Softmax backward in self attention.
@thread
gridDim.x = batch_size * nhead * seq_len / warps_per_block
blockDim.x = WARP_SIZE
blockDim.y = warps_per_block
@param
grad: [batch_size, nhead, seq_len, seq_len], output grad.
output: [batch_size, nhead, seq_len, seq_len], output of softmax forward.
*/
template <typename T, int ITERATIONS>
__global__ void ker_attn_softmax_bw(T *grad, const T *inp, int softmax_length) {
int batch_idx = blockIdx.x * blockDim.y + threadIdx.y;
int offset = batch_idx * softmax_length + threadIdx.x;
grad += offset;
inp += offset;
T grad_reg[ITERATIONS];
T inp_reg[ITERATIONS];
float sum = 0.0;
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length) {
grad_reg[i] = grad[i * WARP_SIZE];
inp_reg[i] = inp[i * WARP_SIZE];
sum += (float)grad_reg[i] * (float)inp_reg[i];
}
}
cg::thread_block b = cg::this_thread_block();
cg::thread_block_tile<WARP_SIZE> g = cg::tiled_partition<WARP_SIZE>(b);
for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i);
#pragma unroll
for (int i = 0; i < ITERATIONS; ++i) {
int curr_idx = threadIdx.x + i * WARP_SIZE;
if (curr_idx < softmax_length)
grad[i * WARP_SIZE] = (T)((float)inp_reg[i] * ((float)grad_reg[i] - sum));
}
}
template <typename T>
void launch_attn_softmax_bw(T *out_grad, const T *soft_inp, int rows,
int softmax_len, cudaStream_t stream) {
const int warps_per_block = 4;
// rows = batch_size * nhead * from_len
dim3 grid_dim(rows / warps_per_block);
dim3 block_dim(WARP_SIZE, warps_per_block);
if (softmax_len <= 32)
ker_attn_softmax_bw<T, 1>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 64)
ker_attn_softmax_bw<T, 2>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 128)
ker_attn_softmax_bw<T, 4>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 256)
ker_attn_softmax_bw<T, 8>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 384)
ker_attn_softmax_bw<T, 12>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 512)
ker_attn_softmax_bw<T, 16>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 768)
ker_attn_softmax_bw<T, 24>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 1024)
ker_attn_softmax_bw<T, 32>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else if (softmax_len <= 2048)
ker_attn_softmax_bw<T, 64>
<<<grid_dim, block_dim, 0, stream>>>(out_grad, soft_inp, softmax_len);
else
throw std::runtime_error(
std::string(
"Special sequence length found in softmax backward, seq_len: ") +
std::to_string(softmax_len));
}
template void launch_attn_softmax_bw<__half>(__half *out_grad,
const __half *soft_inp, int rows,
int softmax_len,
cudaStream_t stream);
template void launch_attn_softmax_bw<float>(float *out_grad,
const float *soft_inp, int rows,
int softmax_len,
cudaStream_t stream);
|
6a7fc89bad987cb13b2b4491029b383c504c724e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "upsample_wsl_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
inline __device__ int idx(
const int n,
const int num_channels,
const int c,
const int height,
const int width,
const int y,
const int x) {
return ((n * num_channels + c) * height + y) * width + x;
}
// input is X, output is Y
__global__ void UpsampleBilinearKernel(
const int output_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(index, output_size) {
int indexTemp = index;
const int out_x = indexTemp % output_width;
indexTemp /= output_width;
const int out_y = indexTemp % output_height;
indexTemp /= output_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int in_y = fminf(out_y / height_scale, input_height - 1);
const int in_x = fminf(out_x / width_scale, input_width - 1);
const float rheight =
output_height > 1 ? (input_height - 1.f) / (output_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (input_width - 1.f) / (output_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * out_y;
const int h1 = (int)h1r;
const int h1p = (h1 < input_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * out_x;
const int w1 = (int)w1r;
const int w1p = (w1 < input_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
Y[index] =
(h0lambda *
(w0lambda *
X[idx(
n, num_channels, c, input_height, input_width, h1, w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1,
w1 + w1p)]) +
h1lambda *
(w0lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1 + w1p)]));
}
}
// input is dY, output is dX
__global__ void UpsampleBilinearGradientKernel(
const int input_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
CUDA_1D_KERNEL_LOOP(index, input_size) {
int indexTemp = index;
const int in_x = indexTemp % input_width;
indexTemp /= input_width;
const int in_y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(in_y / height_scale, output_height - 1);
const int out_x = fminf(in_x / width_scale, output_width - 1);
const float rheight =
output_height > 1 ? (output_height - 1.f) / (input_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (output_width - 1.f) / (input_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * in_y;
const int h1 = (int)h1r;
const int h1p = (h1 < output_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * in_x;
const int w1 = (int)w1r;
const int w1p = (w1 < output_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
#if __CUDA_ARCH__ >= 350
const float dYi = __ldg(&dY[index]);
#else
const float dYi = dY[index];
#endif
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1)],
h0lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1 + w1p)],
h0lambda * w1lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1 + h1p, w1)],
h1lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(
n,
num_channels,
c,
output_height,
output_width,
h1 + h1p,
w1 + w1p)],
h1lambda * w1lambda * dYi);
}
}
} // namespace
template <>
bool UpsampleBilinearWSLOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& Z = Input(1);
const auto inputDims = X.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = X.dim32(0), num_channels = X.dim32(1),
input_height = X.dim32(2), input_width = X.dim32(3);
//int output_width = input_width * width_scale_;
//int output_height = input_height * height_scale_;
int output_width = Z.dim32(3);
int output_height = Z.dim32(2);
width_scale_ = output_width / input_width;
height_scale_ = output_height / input_height;
auto* Y = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
const auto size = Y->numel();
hipLaunchKernelGGL(( UpsampleBilinearKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool UpsampleBilinearWSLGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& dY = Input(0);
const auto& X = Input(1);
const auto inputDims = dY.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = dY.dim32(0);
const int num_channels = dY.dim32(1);
const int input_height = dY.dim32(2);
const int input_width = dY.dim32(3);
const int output_height = X.dim32(2);
const int output_width = X.dim32(3);
width_scale_ = input_width / output_width;
height_scale_ = input_height / output_height;
auto* dX = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
math::Set<float, CUDAContext>(
dX->numel(), 0.0f, dX->mutable_data<float>(), &context_);
const auto size = dY.numel();
hipLaunchKernelGGL(( UpsampleBilinearGradientKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
UpsampleBilinearWSL,
UpsampleBilinearWSLOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
UpsampleBilinearWSLGradient,
UpsampleBilinearWSLGradientOp<float, CUDAContext>);
} // namespace caffe2
| 6a7fc89bad987cb13b2b4491029b383c504c724e.cu | #include "caffe2/core/context_gpu.h"
#include "upsample_wsl_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
inline __device__ int idx(
const int n,
const int num_channels,
const int c,
const int height,
const int width,
const int y,
const int x) {
return ((n * num_channels + c) * height + y) * width + x;
}
// input is X, output is Y
__global__ void UpsampleBilinearKernel(
const int output_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* X,
float* Y) {
CUDA_1D_KERNEL_LOOP(index, output_size) {
int indexTemp = index;
const int out_x = indexTemp % output_width;
indexTemp /= output_width;
const int out_y = indexTemp % output_height;
indexTemp /= output_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int in_y = fminf(out_y / height_scale, input_height - 1);
const int in_x = fminf(out_x / width_scale, input_width - 1);
const float rheight =
output_height > 1 ? (input_height - 1.f) / (output_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (input_width - 1.f) / (output_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * out_y;
const int h1 = (int)h1r;
const int h1p = (h1 < input_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * out_x;
const int w1 = (int)w1r;
const int w1p = (w1 < input_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
Y[index] =
(h0lambda *
(w0lambda *
X[idx(
n, num_channels, c, input_height, input_width, h1, w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1,
w1 + w1p)]) +
h1lambda *
(w0lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1)] +
w1lambda *
X[idx(
n,
num_channels,
c,
input_height,
input_width,
h1 + h1p,
w1 + w1p)]));
}
}
// input is dY, output is dX
__global__ void UpsampleBilinearGradientKernel(
const int input_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
CUDA_1D_KERNEL_LOOP(index, input_size) {
int indexTemp = index;
const int in_x = indexTemp % input_width;
indexTemp /= input_width;
const int in_y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(in_y / height_scale, output_height - 1);
const int out_x = fminf(in_x / width_scale, output_width - 1);
const float rheight =
output_height > 1 ? (output_height - 1.f) / (input_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (output_width - 1.f) / (input_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * in_y;
const int h1 = (int)h1r;
const int h1p = (h1 < output_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * in_x;
const int w1 = (int)w1r;
const int w1p = (w1 < output_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
#if __CUDA_ARCH__ >= 350
const float dYi = __ldg(&dY[index]);
#else
const float dYi = dY[index];
#endif
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1)],
h0lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1 + w1p)],
h0lambda * w1lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1 + h1p, w1)],
h1lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(
n,
num_channels,
c,
output_height,
output_width,
h1 + h1p,
w1 + w1p)],
h1lambda * w1lambda * dYi);
}
}
} // namespace
template <>
bool UpsampleBilinearWSLOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto& Z = Input(1);
const auto inputDims = X.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = X.dim32(0), num_channels = X.dim32(1),
input_height = X.dim32(2), input_width = X.dim32(3);
//int output_width = input_width * width_scale_;
//int output_height = input_height * height_scale_;
int output_width = Z.dim32(3);
int output_height = Z.dim32(2);
width_scale_ = output_width / input_width;
height_scale_ = output_height / input_height;
auto* Y = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
const auto size = Y->numel();
UpsampleBilinearKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool UpsampleBilinearWSLGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& dY = Input(0);
const auto& X = Input(1);
const auto inputDims = dY.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = dY.dim32(0);
const int num_channels = dY.dim32(1);
const int input_height = dY.dim32(2);
const int input_width = dY.dim32(3);
const int output_height = X.dim32(2);
const int output_width = X.dim32(3);
width_scale_ = input_width / output_width;
height_scale_ = input_height / output_height;
auto* dX = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
math::Set<float, CUDAContext>(
dX->numel(), 0.0f, dX->mutable_data<float>(), &context_);
const auto size = dY.numel();
UpsampleBilinearGradientKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
UpsampleBilinearWSL,
UpsampleBilinearWSLOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
UpsampleBilinearWSLGradient,
UpsampleBilinearWSLGradientOp<float, CUDAContext>);
} // namespace caffe2
|
0d2d8073f2e251ca649396b069ca0cfc09acd721.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
* cuda_2d_fdaco.cu
* This is an example of the CUDA program to calculate 2d acoustic
* wavefield using staggered-grid finite-difference like method with
* PML absorbing boundary condition.
*
* Scripted by: Long Guihua
* Initiated time: 2010/04/08
* Last modified: 2010/04/08
* Contact info: [email protected]
*
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_2d_fdaco.h"
#define BLOCK_DIMX 16 // tile (and threadblock) size in x
#define BLOCK_DIMY 16 // tile (and threadblock) size in y
#define radius 4 // length of difference coefficients
#define PI 3.1415926
__constant__ float c_coeff[radius];
__global__ void fwd_2dhb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
// if (iy > radius -1 && iy < dimy - radius && threadIdx.x < radius) // halo left/right
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic]* (s_data[ty][tx + ic] - s_data[ty][tx - ic -1]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void fwd_2dhf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
// if (iy > radius -1 && iy < dimy - radius && threadIdx.x < radius) // halo left/right
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic] * (s_data[ty][tx + ic + 1] - s_data[ty][tx - ic]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void fwd_2dvb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
// if (ix > radius - 1 && ix < dimx - radius && threadIdx.y < radius) // halo above/below
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic] * (s_data[ty + ic][tx] - s_data[ty - ic - 1][tx]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void fwd_2dvf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
// if (ix > radius - 1 && ix < dimx - radius && threadIdx.y < radius) // halo above/below
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic]* (s_data[ty + ic + 1][tx] - s_data[ty - ic][tx]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void bd_fwd_2dhb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (ix == 0) // set value 0.0f to the first column
g_output[in_idx] = 0.0f;
if (ix < radius && ix > 0 && iy < dimy ) // left boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - 1]) * g_param[in_idx];
if (ix > dimx - radius - 1 && ix < dimx && iy < dimy ) // right boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - 1]) * g_param[in_idx];
}
__global__ void bd_fwd_2dhf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (ix < radius && iy < dimy ) // left boundary and forward
g_output[in_idx] = (g_input[in_idx + 1] - g_input[in_idx]) * g_param[in_idx];
if (ix > dimx - radius -1 && ix < dimx - 1 && iy < dimy ) // right boundary and forward
g_output[in_idx] = (g_input[in_idx + 1] - g_input[in_idx]) * g_param[in_idx];
if (ix == dimx - 1)
g_output[in_idx] = 0.0f;
}
__global__ void bd_fwd_2dvb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (iy == 0) // set value 0.0f to the first column
g_output[in_idx] = 0.0f;
if (iy < radius && iy > 0 && ix < dimx ) // left boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - dimx]) * g_param[in_idx];
if (iy > dimy - radius - 1 && iy < dimy && ix < dimx ) // right boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - dimx]) * g_param[in_idx];
}
__global__ void bd_fwd_2dvf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (iy < radius && ix < dimx ) // left boundary and forward
g_output[in_idx] = (g_input[in_idx + dimx] - g_input[in_idx]) * g_param[in_idx];
if (iy > dimy - radius -1 && iy < dimy - 1 && ix < dimx ) // right boundary and forward
g_output[in_idx] = (g_input[in_idx + dimx] - g_input[in_idx]) * g_param[in_idx];
if (iy == dimy - 1)
g_output[in_idx] = 0.0f;
}
__global__ void AddSource(int nxe, int nze, float *d_taux, float *d_tauz, float *d_src, float *d_tau)
{
__shared__ float s_dtaux[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_dtauz[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_dsrc[BLOCK_DIMY][BLOCK_DIMX];
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = iy * nxe + ix;
if (ix < nxe && iy < nze)
{
s_dtaux[threadIdx.y][threadIdx.x] = d_taux[idx];
s_dtauz[threadIdx.y][threadIdx.x] = d_tauz[idx];
s_dsrc[threadIdx.y][threadIdx.x] = d_src[idx];
}
__syncthreads();
d_tau[idx] = s_dtaux[threadIdx.y][threadIdx.x] + s_dtauz[threadIdx.y][threadIdx.x] + s_dsrc[threadIdx.y][threadIdx.x];
}
__global__ void MatMulAdd_PerElem(int nxe, int nze, float *c, float *a, float *abar, float *b, float alpha)
{
__shared__ float s_a[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_abar[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_b[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_c[BLOCK_DIMY][BLOCK_DIMX];
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = iy * nxe + ix;
if (ix < nxe && iy < nze)
{
s_a[threadIdx.y][threadIdx.x] = a[idx];
s_abar[threadIdx.y][threadIdx.x] = abar[idx];
s_b[threadIdx.y][threadIdx.x] = b[idx];
s_c[threadIdx.y][threadIdx.x] = c[idx];
}
__syncthreads();
c[idx] = s_a[threadIdx.y][threadIdx.x] * s_c[threadIdx.y][threadIdx.x]
+ s_abar[threadIdx.y][threadIdx.x] * s_b[threadIdx.y][threadIdx.x] * alpha;
}
void submodext(int nx, int nz, int *abc, float *rd, float *rv, float *rde, float *rve)
{
int ix, iz, nxe, nze;
nxe = nx+abc[0]+abc[2];
nze = nz+abc[1]+abc[3];
/* model kernel */
for (iz=abc[1]; iz<nz+abc[1]; iz++)
for (ix=abc[0]; ix<nx+abc[0]; ix++) {
rde[iz*nxe+ix] = rd[(iz-abc[1])*nx+ix-abc[0]];
rve[iz*nxe+ix] = rv[(iz-abc[1])*nx+ix-abc[0]];
}
/* left- and right-sides */
for (iz=abc[1]; iz<nz+abc[1]; iz++) {
for (ix=0; ix<abc[0]; ix++) {
rde[iz*nxe+ix] = rd[(iz-abc[1])*nx];
rve[iz*nxe+ix] = rv[(iz-abc[1])*nx];
}
for (ix=nx+abc[0]; ix<nxe; ix++) {
rde[iz*nxe+ix] = rd[(iz-abc[1])*nx+nx-1];
rve[iz*nxe+ix] = rv[(iz-abc[1])*nx+nx-1];
}
}
/* upper- and lower- sides */
for (ix=abc[0]; ix<nx+abc[0]; ix++) {
for (iz=0; iz<abc[1]; iz++) {
rde[iz*nxe+ix] = rd[ix-abc[0]];
rve[iz*nxe+ix] = rv[ix-abc[0]];
}
for (iz=nz+abc[1]; iz<nze; iz++) {
rde[iz*nxe+ix] = rd[(nz-1)*nx+ix-abc[0]];
rve[iz*nxe+ix] = rv[(nz-1)*nx+ix-abc[0]];
}
}
/* upper-left corner */
for (iz=0; iz<abc[1]; iz++)
for (ix=0; ix<abc[0]; ix++) {
rde[iz*nxe+ix] = rd[0];
rve[iz*nxe+ix] = rv[0];
}
/* upper-right corner */
for (iz=0; iz<abc[1]; iz++)
for (ix=nx+abc[0]; ix<nxe; ix++) {
rde[iz*nxe+ix] = rd[nx-1];
rve[iz*nxe+ix] = rv[nx-1];
}
/* lower-left corner */
for (iz=nz+abc[1]; iz<nze; iz++)
for (ix=0; ix<abc[0]; ix++) {
rde[iz*nxe+ix] = rd[(nz-1)*nx];
rve[iz*nxe+ix] = rv[(nz-1)*nx];
}
/* lower-right corner */
for (iz=nz+abc[1]; iz<nze; iz++)
for (ix=nx+abc[0]; ix<nxe; ix++) {
rde[iz*nxe+ix] = rd[(nz-1)*nx+nx-1];
rve[iz*nxe+ix] = rv[(nz-1)*nx+nx-1];
}
return;
}
void subpml(int nx, int nz, float dx, float dz, float R, int *nmpl, float *ve, float *qx1, float *qz1, float *qx2, float *qz2)
{
int i, j, nxe, nze;
float tmp, idx;
nxe = nx+nmpl[0]+nmpl[2];
nze = nz+nmpl[1]+nmpl[3];
for (j=0; j<nz; j++)
for (i=0; i<nx; i++) {
qx1[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
qx2[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
qz1[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
qz2[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
}
tmp = (float)nmpl[0]*dx;
for (j=0; j<nze; j++) /* left boundary */
for (i=0; i<nmpl[0]; i++) {
idx = (float)(nmpl[0]-i);
qx1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dx/tmp)*(idx*dx/tmp)/(2.0f*tmp);
qx2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dx/tmp)*((idx+0.5f)*dx/tmp)/(2.0f*tmp);
}
tmp = (float)nmpl[2]*dx;
for (j=0; j<nze; j++) /* right boundary */
for (i=nx+nmpl[0]; i<nxe; i++) {
idx = (float)(i+nmpl[2]+1-nxe);
qx1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dx/tmp)*(idx*dx/tmp)/(2.0f*tmp);
qx2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dx/tmp)*((idx+0.5f)*dx/tmp)/(2.0f*tmp);
}
tmp = (float)nmpl[1]*dz;
for (i=0; i<nxe; i++) /* upper boundary */
for (j=0; j<nmpl[1]; j++) {
idx = (float)(nmpl[1]-j);
qz1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dz/tmp)*(idx*dz/tmp)/(2.0f*tmp);
qz2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dz/tmp)*((idx+0.5f)*dz/tmp)/(2.0f*tmp);
}
tmp = (float)nmpl[3]*dz;
for (i=0; i<nxe; i++) /* lower boundary */
for (j=nz+nmpl[1]; j<nze; j++) {
idx = (float)(j+nmpl[3]+1-nze);
qz1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dz/tmp)*(idx*dz/tmp)/(2.0f*tmp);
qz2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dz/tmp)*((idx+0.5f)*dz/tmp)/(2.0f*tmp);
}
return;
}
void substager(int nxe, int nze, float *rde, float *rve, float *bux, float *buz, float *kappa)
{
int i, j;
for (j=1; j<nze-1; j++)
for (i=1; i<nxe-1; i++) {
bux[j*nxe+i] = 2.0f/(rde[j*nxe+i]+rde[j*nxe+i+1]);
buz[j*nxe+i] = 2.0f/(rde[j*nxe+i]+rde[(j+1)*nxe+i]);
kappa[j*nxe+i] = rde[j*nxe+i]*rve[j*nxe+i]*rve[j*nxe+i];
}
for (j=0; j<nze; j++) {
bux[j*nxe] = 1.0f/rde[j*nxe];
buz[j*nxe] = 1.0f/rde[j*nxe];
bux[j*nxe+nxe-1] = 1.0f/rde[j*nxe+nxe-1];
buz[j*nxe+nxe-1] = 1.0f/rde[j*nxe+nxe-1];
kappa[j*nxe] = rde[j*nxe]*rve[j*nxe]*rve[j*nxe];
kappa[j*nxe+nxe-1] = rde[j*nxe+nxe-1]*rve[j*nxe+nxe-1]*rve[j*nxe+nxe-1];
}
for (i=0; i<nxe; i++) {
bux[i] = 1.0f/rde[i];
buz[i] = 1.0f/rde[i];
bux[(nze-1)*nxe+i] = 1.0f/rde[(nze-1)*nxe+i];
buz[(nze-1)*nxe+i] = 1.0f/rde[(nze-1)*nxe+i];
kappa[i] = rde[i]*rve[i]*rve[i];
kappa[(nze-1)*nxe+i] = rde[(nze-1)*nxe+i]*rve[(nze-1)*nxe+i]*rve[(nze-1)*nxe+i];
}
return;
}
void wavelet(Source sour)
{
float t0 = 1.5f*sqrtf(6.0f)/((float)PI*sour.f0);
float t, da, da2;
for (int i=0; i<sour.ns; i++)
{
t = (float)i*sour.dt;
da = (float)PI*sour.f0*(t-t0);
da2 = da*da;
if (sour.iss == 1) sour.src[i] = cosf(2.0f*(float)PI*sour.f0*t);
else if (sour.iss == 2) sour.src[i] = (1.0f-2.0f*da2)*expf(-da2);
else if (sour.iss == 3) sour.src[i] = (t-t0)*expf(-da2);
else sour.src[i] = -4.0f*da*(float)PI*sour.f0*expf(-da2)
-2.0f*da*(float)PI*sour.f0*(1.0f-2.0f*da2)*expf(-da2);
}
return;
}
void forward(float t, Model model, Source sour, int *abc, float R, float tpoint, float *snap, float *seis)
{
// Extend model grids
int nxe = model.nx + abc[0] + abc[2];
int nze = model.nz + abc[1] + abc[3];
// Allocate memory for model parameters
float *rde = (float *)malloc(nxe * nze * sizeof(float));
float *rve = (float *)malloc(nxe * nze * sizeof(float));
float *kappa = (float *)malloc(nxe * nze * sizeof(float));
float *bux = (float *)malloc(nxe * nze * sizeof(float));
float *buz = (float *)malloc(nxe * nze * sizeof(float));
// Extend model grids
submodext(model.nx, model.nz, abc, model.rd, model.rv, rde, rve);
// Interpolate model parameters in-between grids
substager(nxe, nze, rde, rve, bux, buz, kappa);
// Load model parameters from host to device
float *d_bux;
hipMalloc((void **) &d_bux, nxe * nze * sizeof(float));
hipMemcpy(d_bux, bux, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_buz;
hipMalloc((void **) &d_buz, nxe * nze * sizeof(float));
hipMemcpy(d_buz, buz, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_kappa;
hipMalloc((void **) &d_kappa, nxe * nze * sizeof(float));
hipMemcpy(d_kappa, kappa, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
// Free memory on host
free(bux); free(buz); free(kappa);
// Allocate memory for PML boundary matrices
float *pmlx = (float *)malloc(nxe * nze * sizeof(float));
float *pmlz = (float *)malloc(nxe * nze * sizeof(float));
float *pmlxh = (float *)malloc(nxe * nze * sizeof(float));
float *pmlzh = (float *)malloc(nxe * nze * sizeof(float));
// Generate PML boundary governing matrices
subpml(model.nx, model.nz, model.dx, model.dz, R, abc, rve, pmlx, pmlz, pmlxh, pmlzh);
// Define and generate temper matrices of PML boundary
float *tempx = (float *)malloc(nxe * nze * sizeof(float));
float *tempz = (float *)malloc(nxe * nze * sizeof(float));
float *tempxh = (float *)malloc(nxe * nze * sizeof(float));
float *tempzh = (float *)malloc(nxe * nze * sizeof(float));
float *tempx5 = (float *)malloc(nxe * nze * sizeof(float));
float *tempz5 = (float *)malloc(nxe * nze * sizeof(float));
float *tempxh5 = (float *)malloc(nxe * nze * sizeof(float));
float *tempzh5 = (float *)malloc(nxe * nze * sizeof(float));
for (int iz = 0; iz < nze; iz++)
{
for (int ix = 0; ix < nxe; ix++)
{
tempx[iz * nxe + ix] = expf(-sour.dt * pmlx[iz * nxe + ix]);
tempz[iz * nxe + ix] = expf(-sour.dt * pmlz[iz * nxe + ix]);
tempxh[iz * nxe + ix] = expf(-sour.dt * pmlxh[iz * nxe + ix]);
tempzh[iz * nxe + ix] = expf(-sour.dt * pmlzh[iz * nxe + ix]);
tempx5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlx[iz * nxe + ix]);
tempz5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlz[iz * nxe + ix]);
tempxh5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlxh[iz * nxe + ix]);
tempzh5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlzh[iz * nxe + ix]);
}
}
// Load PML temper matrices from host to device
float *d_tempx;
hipMalloc((void **) &d_tempx, nxe * nze * sizeof(float));
hipMemcpy(d_tempx, tempx, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempz;
hipMalloc((void **) &d_tempz, nxe * nze * sizeof(float));
hipMemcpy(d_tempz, tempz, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempxh;
hipMalloc((void **) &d_tempxh, nxe * nze * sizeof(float));
hipMemcpy(d_tempxh, tempxh, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempzh;
hipMalloc((void **) &d_tempzh, nxe * nze * sizeof(float));
hipMemcpy(d_tempzh, tempzh, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempx5;
hipMalloc((void **) &d_tempx5, nxe * nze * sizeof(float));
hipMemcpy(d_tempx5, tempx5, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempz5;
hipMalloc((void **) &d_tempz5, nxe * nze * sizeof(float));
hipMemcpy(d_tempz5, tempz5, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempxh5;
hipMalloc((void **) &d_tempxh5, nxe * nze * sizeof(float));
hipMemcpy(d_tempxh5, tempxh5, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tempzh5;
hipMalloc((void **) &d_tempzh5, nxe * nze * sizeof(float));
hipMemcpy(d_tempzh5, tempzh5, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
// Free memory on host
free(pmlx); free(pmlz); free(pmlxh); free(pmlzh);
free(tempx); free(tempz); free(tempxh); free(tempzh);
free(tempx5); free(tempz5); free(tempxh5); free(tempzh5);
// Define zero vector with lenghth nxe x nze
float *zero = (float *)malloc(nxe * nze * sizeof(float));
for (int iz = 0; iz < nze; iz++)
for (int ix = 0; ix < nxe; ix++)
zero[iz * nxe + ix] = 0.0f;
// Define stress and strain vector on device and initialization
float *d_tau;
hipMalloc((void **) &d_tau, nxe * nze * sizeof(float));
hipMemcpy(d_tau, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_taux;
hipMalloc((void **) &d_taux, nxe * nze * sizeof(float));
hipMemcpy(d_taux, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_tauz;
hipMalloc((void **) &d_tauz, nxe * nze * sizeof(float));
hipMemcpy(d_tauz, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_velx;
hipMalloc((void **) &d_velx, nxe * nze * sizeof(float));
hipMemcpy(d_velx, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *d_velz;
hipMalloc((void **) &d_velz, nxe * nze * sizeof(float));
hipMemcpy(d_velz, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
// Define wavefield differentiation on device
float *dtau_x;
hipMalloc((void **) &dtau_x, nxe * nze * sizeof(float));
hipMemcpy(dtau_x, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *dtau_z;
hipMalloc((void **) &dtau_z, nxe * nze * sizeof(float));
hipMemcpy(d_tau, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *dvel_x;
hipMalloc((void **) &dvel_x, nxe * nze * sizeof(float));
hipMemcpy(dvel_x, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
float *dvel_z;
hipMalloc((void **) &dvel_z, nxe * nze * sizeof(float));
hipMemcpy(dvel_z, zero, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
// Define pressure wavefield on host
float *tau = (float *)malloc(nxe * nze * sizeof(float));
// Define source matrix on host and initiate
float *h_src = (float *)malloc(nxe * nze * sizeof(float));
for (int iz = 0; iz < nze; iz++)
for (int ix = 0; ix < nxe; ix++)
h_src[iz * nxe + ix] = 0.0f;
// Define source matrix on device
float *d_src;
hipMalloc((void **) &d_src, nxe * nze * sizeof(float));
// Total time step for wavefield to propagate
int tot = NINT(t / sour.dt);
// Time point to store the wavefield snapshot
int tp = NINT(tpoint / sour.dt) - 1;
// Source location
int isx = NINT(sour.sx / model.dx) + abc[0];
int isz = NINT(sour.sz / model.dz) + abc[1];
// dtdx and dtdz
float dtdx = sour.dt / model.dx;
float dtdz = sour.dt / model.dz;
// time loop for wavefield propagating
for (int it = 0; it < tot; it++)
{
float sampl;
if ( it < sour.ns)
sampl = sour.src[it];
else sampl = 0.0f;
// Build source matrix
h_src[isz * nxe + isx] = sampl;
hipMemcpy(d_src, h_src, nxe * nze * sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIMX, BLOCK_DIMY);
dim3 dimGrid(nxe / BLOCK_DIMX, nze / BLOCK_DIMY);
hipLaunchKernelGGL(( fwd_2dhf_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velx, dvel_x, d_kappa, nxe, nze);
hipLaunchKernelGGL(( bd_fwd_2dhf_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velx, dvel_x, d_kappa, nxe, nze);
hipLaunchKernelGGL(( fwd_2dvf_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velz, dvel_z, d_kappa, nxe, nze);
hipLaunchKernelGGL(( bd_fwd_2dvf_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_velz, dvel_z, d_kappa, nxe, nze);
hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_taux, d_tempx, d_tempx5, dvel_x, dtdx);
hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_tauz, d_tempz, d_tempz5, dvel_z, dtdz);
// hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_taux, d_tempx, d_tempx5, dvel_x, dtdx);
// hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_tauz, d_tempz, d_tempz5, dvel_z, dtdz);
hipLaunchKernelGGL(( AddSource), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_taux, d_tauz, d_src, d_tau);
hipLaunchKernelGGL(( fwd_2dhb_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tau, dtau_x, d_bux, nxe, nze);
hipLaunchKernelGGL(( bd_fwd_2dhb_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tau, dtau_x, d_bux, nxe, nze);
hipLaunchKernelGGL(( fwd_2dvb_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tau, dtau_z, d_buz, nxe, nze);
hipLaunchKernelGGL(( bd_fwd_2dvb_stg_orderN), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tau, dtau_z, d_buz, nxe, nze);
hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_velx, d_tempx, d_tempx5, dtau_x, dtdx);
hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_velz, d_tempz, d_tempz5, dtau_z, dtdz);
// hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_velx, d_tempxh, d_tempxh5, dtau_x, dtdx);
// hipLaunchKernelGGL(( MatMulAdd_PerElem), dim3(dimGrid), dim3(dimBlock), 0, 0, nxe, nze, d_velz, d_tempzh, d_tempzh5, dtau_z, dtdz);
// Save snapshot at time tp
hipMemcpy(tau, d_tau, nxe * nze * sizeof(float), hipMemcpyDeviceToHost);
for (int ix = abc[0]; ix < model.nx + abc[0]; ix++)
seis[it * model.nx + ix - abc[0]] = tau[abc[1] * nxe + ix];
if (it == tp)
{
for (int iz = abc[1]; iz < model.nz + abc[1]; iz++)
for (int ix = abc[0]; ix < model.nx + abc[0]; ix++)
snap[(iz-abc[1]) * model.nx + ix - abc[0]] = tau[iz * nxe + ix];
}
}
// Free memory on device
hipFree(dtau_x); hipFree(dtau_z);
hipFree(dvel_x); hipFree(dvel_z);
hipFree(d_taux); hipFree(d_tauz);
hipFree(d_velx); hipFree(d_velz);
hipFree(d_tau); hipFree(d_kappa);
hipFree(d_bux); hipFree(d_buz);
hipFree(d_src);
hipFree(d_tempx); hipFree(d_tempz);
hipFree(d_tempxh); hipFree(d_tempzh);
hipFree(d_tempx5); hipFree(d_tempz5);
hipFree(d_tempxh5); hipFree(d_tempzh5);
// Free memory on host
free(rde); free(rve);
free(tau); free(h_src);
}
#define row (512 - 40)
#define col (512 - 40)
int main(void)
{
// Differentiation coefficients
float h_coeff[radius] = {1225.f/1024.0f, -245.f/3072.f, 49.f/5120.f, -5.f/7168.f};
hipMemcpyToSymbol(c_coeff, h_coeff, radius * sizeof(float));
if ( hipGetLastError() != hipSuccess )
{
printf("coefficient upload to GPU failed \n");
exit(-3);
}
// set time and boundary
float t = 3.5f;
float tp = 1.5f;
float R = 1.0e-6f;
int abc[4] = {20, 20, 20, 20};
// set model parameters
Model model;
model.nx = col;
model.nz = row;
model.dx = 10.0f;
model.dz = 10.0f;
model.rd = (float *)malloc(model.nx * model.nz * sizeof(float));
model.rv = (float *)malloc(model.nx * model.nz * sizeof(float));
int Lx = model.nx + abc[0] + abc[2];
int Lz = model.nz + abc[1] + abc[3];
for (int iz = 0; iz < model.nz; iz ++)
{
for (int ix = 0; ix < model.nx; ix++)
{
model.rd[iz * model.nx + ix] = 1500.0f;
if (iz > 3 * Lz /8 - abc[1] && ix > 3 * Lx / 8 -abc[0])
model.rv[iz * model.nx + ix] = 3500.0f;
else
model.rv[iz * model.nx + ix] = 1500.0f;
}
}
// Set source parameters
Source sour;
sour.ns = 512;
sour.sx = (3 * Lx / 8 - abc[0]) * model.dx;
sour.sz = (Lz / 4 - abc[1]) * model.dz;
sour.dt = 0.001;
sour.f0 = 15.0;
sour.iss = 2;
sour.src = (float *)malloc(sour.ns * sizeof(float));
wavelet(sour);
// Allocate memory for snapshot wavefield
float *snap = (float *)malloc(model.nx * model.nz * sizeof(float));
float *seis = (float *)malloc(model.nx * NINT(t / sour.dt) * sizeof(float));
clock_t start = clock();
forward(t, model, sour, abc, R, tp, snap, seis);
clock_t end = clock();
printf("Time for computation of size %d x %d is %es\n", model.nx, model.nz, (double)(end - start) / CLOCKS_PER_SEC);
FILE *fsnap = fopen("fsnap.dat", "w");
for (int ix = 0; ix < model.nx; ix++)
for (int iz = 0; iz < model.nz; iz++)
fprintf(fsnap, "%e\n", snap[iz * model.nx + ix]);
fclose(fsnap);
printf("nstep = %d\n", NINT(t / sour.dt));
FILE * fseis = fopen("fseis.dat","w");
for (int ix = 0; ix < model.nx; ix ++)
for (int it = 0; it < NINT(t / sour.dt); it ++)
fprintf(fseis, "%e\n", seis[it * model.nx + ix]);
fclose(fseis);
// Free memory
free(model.rd);
free(model.rv);
free(sour.src);
free(snap); free(seis);
return 0;
}
| 0d2d8073f2e251ca649396b069ca0cfc09acd721.cu | /********************************************************************
* cuda_2d_fdaco.cu
* This is an example of the CUDA program to calculate 2d acoustic
* wavefield using staggered-grid finite-difference like method with
* PML absorbing boundary condition.
*
* Scripted by: Long Guihua
* Initiated time: 2010/04/08
* Last modified: 2010/04/08
* Contact info: [email protected]
*
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_2d_fdaco.h"
#define BLOCK_DIMX 16 // tile (and threadblock) size in x
#define BLOCK_DIMY 16 // tile (and threadblock) size in y
#define radius 4 // length of difference coefficients
#define PI 3.1415926
__constant__ float c_coeff[radius];
__global__ void fwd_2dhb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
// if (iy > radius -1 && iy < dimy - radius && threadIdx.x < radius) // halo left/right
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic]* (s_data[ty][tx + ic] - s_data[ty][tx - ic -1]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void fwd_2dhf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
// if (iy > radius -1 && iy < dimy - radius && threadIdx.x < radius) // halo left/right
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic] * (s_data[ty][tx + ic + 1] - s_data[ty][tx - ic]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void fwd_2dvb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
// if (ix > radius - 1 && ix < dimx - radius && threadIdx.y < radius) // halo above/below
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic] * (s_data[ty + ic][tx] - s_data[ty - ic - 1][tx]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void fwd_2dvf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
__shared__ float s_data[BLOCK_DIMY + 2 * radius][BLOCK_DIMX + 2 * radius];
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x + radius; // thread's x-index into corresponding shared memory tile
int ty = threadIdx.y + radius; // thread's y-index into corresponding shared memory tile
int in_idx = iy * dimx + ix; // index for reading input
// update the data in shared memory in halo part
// if (ix > radius - 1 && ix < dimx - radius && threadIdx.y < radius) // halo above/below
if (ix < dimx && threadIdx.y < radius) // halo above/below
{
s_data[threadIdx.y][tx] = g_input[in_idx - radius * dimx];
s_data[threadIdx.y + BLOCK_DIMY + radius][tx] = g_input[in_idx + BLOCK_DIMY * dimx];
}
if (iy < dimy && threadIdx.x < radius) // halo left/right
{
s_data[ty][threadIdx.x] = g_input[in_idx - radius];
s_data[ty][threadIdx.x + BLOCK_DIMX + radius] = g_input[in_idx + BLOCK_DIMX];
}
// update the data in shared memory within BLOCKED part
s_data[ty][tx] = g_input[in_idx];
__syncthreads();
// compute the output value
float temp = 0.0f;
for (int ic = 0; ic < radius; ic++)
temp += c_coeff[ic]* (s_data[ty + ic + 1][tx] - s_data[ty - ic][tx]);
g_output[in_idx] = temp * g_param[in_idx];
}
__global__ void bd_fwd_2dhb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (ix == 0) // set value 0.0f to the first column
g_output[in_idx] = 0.0f;
if (ix < radius && ix > 0 && iy < dimy ) // left boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - 1]) * g_param[in_idx];
if (ix > dimx - radius - 1 && ix < dimx && iy < dimy ) // right boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - 1]) * g_param[in_idx];
}
__global__ void bd_fwd_2dhf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (ix < radius && iy < dimy ) // left boundary and forward
g_output[in_idx] = (g_input[in_idx + 1] - g_input[in_idx]) * g_param[in_idx];
if (ix > dimx - radius -1 && ix < dimx - 1 && iy < dimy ) // right boundary and forward
g_output[in_idx] = (g_input[in_idx + 1] - g_input[in_idx]) * g_param[in_idx];
if (ix == dimx - 1)
g_output[in_idx] = 0.0f;
}
__global__ void bd_fwd_2dvb_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (iy == 0) // set value 0.0f to the first column
g_output[in_idx] = 0.0f;
if (iy < radius && iy > 0 && ix < dimx ) // left boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - dimx]) * g_param[in_idx];
if (iy > dimy - radius - 1 && iy < dimy && ix < dimx ) // right boundary and backward
g_output[in_idx] = (g_input[in_idx] - g_input[in_idx - dimx]) * g_param[in_idx];
}
__global__ void bd_fwd_2dvf_stg_orderN(float *g_input, float *g_output, float *g_param, const int dimx,
const int dimy)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int in_idx = iy * dimx + ix; // index for reading input
if (iy < radius && ix < dimx ) // left boundary and forward
g_output[in_idx] = (g_input[in_idx + dimx] - g_input[in_idx]) * g_param[in_idx];
if (iy > dimy - radius -1 && iy < dimy - 1 && ix < dimx ) // right boundary and forward
g_output[in_idx] = (g_input[in_idx + dimx] - g_input[in_idx]) * g_param[in_idx];
if (iy == dimy - 1)
g_output[in_idx] = 0.0f;
}
__global__ void AddSource(int nxe, int nze, float *d_taux, float *d_tauz, float *d_src, float *d_tau)
{
__shared__ float s_dtaux[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_dtauz[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_dsrc[BLOCK_DIMY][BLOCK_DIMX];
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = iy * nxe + ix;
if (ix < nxe && iy < nze)
{
s_dtaux[threadIdx.y][threadIdx.x] = d_taux[idx];
s_dtauz[threadIdx.y][threadIdx.x] = d_tauz[idx];
s_dsrc[threadIdx.y][threadIdx.x] = d_src[idx];
}
__syncthreads();
d_tau[idx] = s_dtaux[threadIdx.y][threadIdx.x] + s_dtauz[threadIdx.y][threadIdx.x] + s_dsrc[threadIdx.y][threadIdx.x];
}
__global__ void MatMulAdd_PerElem(int nxe, int nze, float *c, float *a, float *abar, float *b, float alpha)
{
__shared__ float s_a[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_abar[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_b[BLOCK_DIMY][BLOCK_DIMX];
__shared__ float s_c[BLOCK_DIMY][BLOCK_DIMX];
int ix = blockDim.x * blockIdx.x + threadIdx.x;
int iy = blockDim.y * blockIdx.y + threadIdx.y;
int idx = iy * nxe + ix;
if (ix < nxe && iy < nze)
{
s_a[threadIdx.y][threadIdx.x] = a[idx];
s_abar[threadIdx.y][threadIdx.x] = abar[idx];
s_b[threadIdx.y][threadIdx.x] = b[idx];
s_c[threadIdx.y][threadIdx.x] = c[idx];
}
__syncthreads();
c[idx] = s_a[threadIdx.y][threadIdx.x] * s_c[threadIdx.y][threadIdx.x]
+ s_abar[threadIdx.y][threadIdx.x] * s_b[threadIdx.y][threadIdx.x] * alpha;
}
void submodext(int nx, int nz, int *abc, float *rd, float *rv, float *rde, float *rve)
{
int ix, iz, nxe, nze;
nxe = nx+abc[0]+abc[2];
nze = nz+abc[1]+abc[3];
/* model kernel */
for (iz=abc[1]; iz<nz+abc[1]; iz++)
for (ix=abc[0]; ix<nx+abc[0]; ix++) {
rde[iz*nxe+ix] = rd[(iz-abc[1])*nx+ix-abc[0]];
rve[iz*nxe+ix] = rv[(iz-abc[1])*nx+ix-abc[0]];
}
/* left- and right-sides */
for (iz=abc[1]; iz<nz+abc[1]; iz++) {
for (ix=0; ix<abc[0]; ix++) {
rde[iz*nxe+ix] = rd[(iz-abc[1])*nx];
rve[iz*nxe+ix] = rv[(iz-abc[1])*nx];
}
for (ix=nx+abc[0]; ix<nxe; ix++) {
rde[iz*nxe+ix] = rd[(iz-abc[1])*nx+nx-1];
rve[iz*nxe+ix] = rv[(iz-abc[1])*nx+nx-1];
}
}
/* upper- and lower- sides */
for (ix=abc[0]; ix<nx+abc[0]; ix++) {
for (iz=0; iz<abc[1]; iz++) {
rde[iz*nxe+ix] = rd[ix-abc[0]];
rve[iz*nxe+ix] = rv[ix-abc[0]];
}
for (iz=nz+abc[1]; iz<nze; iz++) {
rde[iz*nxe+ix] = rd[(nz-1)*nx+ix-abc[0]];
rve[iz*nxe+ix] = rv[(nz-1)*nx+ix-abc[0]];
}
}
/* upper-left corner */
for (iz=0; iz<abc[1]; iz++)
for (ix=0; ix<abc[0]; ix++) {
rde[iz*nxe+ix] = rd[0];
rve[iz*nxe+ix] = rv[0];
}
/* upper-right corner */
for (iz=0; iz<abc[1]; iz++)
for (ix=nx+abc[0]; ix<nxe; ix++) {
rde[iz*nxe+ix] = rd[nx-1];
rve[iz*nxe+ix] = rv[nx-1];
}
/* lower-left corner */
for (iz=nz+abc[1]; iz<nze; iz++)
for (ix=0; ix<abc[0]; ix++) {
rde[iz*nxe+ix] = rd[(nz-1)*nx];
rve[iz*nxe+ix] = rv[(nz-1)*nx];
}
/* lower-right corner */
for (iz=nz+abc[1]; iz<nze; iz++)
for (ix=nx+abc[0]; ix<nxe; ix++) {
rde[iz*nxe+ix] = rd[(nz-1)*nx+nx-1];
rve[iz*nxe+ix] = rv[(nz-1)*nx+nx-1];
}
return;
}
void subpml(int nx, int nz, float dx, float dz, float R, int *nmpl, float *ve, float *qx1, float *qz1, float *qx2, float *qz2)
{
int i, j, nxe, nze;
float tmp, idx;
nxe = nx+nmpl[0]+nmpl[2];
nze = nz+nmpl[1]+nmpl[3];
for (j=0; j<nz; j++)
for (i=0; i<nx; i++) {
qx1[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
qx2[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
qz1[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
qz2[(j+nmpl[1])*nxe+i+nmpl[0]] = 0.0;
}
tmp = (float)nmpl[0]*dx;
for (j=0; j<nze; j++) /* left boundary */
for (i=0; i<nmpl[0]; i++) {
idx = (float)(nmpl[0]-i);
qx1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dx/tmp)*(idx*dx/tmp)/(2.0f*tmp);
qx2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dx/tmp)*((idx+0.5f)*dx/tmp)/(2.0f*tmp);
}
tmp = (float)nmpl[2]*dx;
for (j=0; j<nze; j++) /* right boundary */
for (i=nx+nmpl[0]; i<nxe; i++) {
idx = (float)(i+nmpl[2]+1-nxe);
qx1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dx/tmp)*(idx*dx/tmp)/(2.0f*tmp);
qx2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dx/tmp)*((idx+0.5f)*dx/tmp)/(2.0f*tmp);
}
tmp = (float)nmpl[1]*dz;
for (i=0; i<nxe; i++) /* upper boundary */
for (j=0; j<nmpl[1]; j++) {
idx = (float)(nmpl[1]-j);
qz1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dz/tmp)*(idx*dz/tmp)/(2.0f*tmp);
qz2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dz/tmp)*((idx+0.5f)*dz/tmp)/(2.0f*tmp);
}
tmp = (float)nmpl[3]*dz;
for (i=0; i<nxe; i++) /* lower boundary */
for (j=nz+nmpl[1]; j<nze; j++) {
idx = (float)(j+nmpl[3]+1-nze);
qz1[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*(idx*dz/tmp)*(idx*dz/tmp)/(2.0f*tmp);
qz2[j*nxe+i] = 3.0f*ve[j*nxe+i]*logf(1.0f/R)*((idx+0.5f)*dz/tmp)*((idx+0.5f)*dz/tmp)/(2.0f*tmp);
}
return;
}
void substager(int nxe, int nze, float *rde, float *rve, float *bux, float *buz, float *kappa)
{
int i, j;
for (j=1; j<nze-1; j++)
for (i=1; i<nxe-1; i++) {
bux[j*nxe+i] = 2.0f/(rde[j*nxe+i]+rde[j*nxe+i+1]);
buz[j*nxe+i] = 2.0f/(rde[j*nxe+i]+rde[(j+1)*nxe+i]);
kappa[j*nxe+i] = rde[j*nxe+i]*rve[j*nxe+i]*rve[j*nxe+i];
}
for (j=0; j<nze; j++) {
bux[j*nxe] = 1.0f/rde[j*nxe];
buz[j*nxe] = 1.0f/rde[j*nxe];
bux[j*nxe+nxe-1] = 1.0f/rde[j*nxe+nxe-1];
buz[j*nxe+nxe-1] = 1.0f/rde[j*nxe+nxe-1];
kappa[j*nxe] = rde[j*nxe]*rve[j*nxe]*rve[j*nxe];
kappa[j*nxe+nxe-1] = rde[j*nxe+nxe-1]*rve[j*nxe+nxe-1]*rve[j*nxe+nxe-1];
}
for (i=0; i<nxe; i++) {
bux[i] = 1.0f/rde[i];
buz[i] = 1.0f/rde[i];
bux[(nze-1)*nxe+i] = 1.0f/rde[(nze-1)*nxe+i];
buz[(nze-1)*nxe+i] = 1.0f/rde[(nze-1)*nxe+i];
kappa[i] = rde[i]*rve[i]*rve[i];
kappa[(nze-1)*nxe+i] = rde[(nze-1)*nxe+i]*rve[(nze-1)*nxe+i]*rve[(nze-1)*nxe+i];
}
return;
}
void wavelet(Source sour)
{
float t0 = 1.5f*sqrtf(6.0f)/((float)PI*sour.f0);
float t, da, da2;
for (int i=0; i<sour.ns; i++)
{
t = (float)i*sour.dt;
da = (float)PI*sour.f0*(t-t0);
da2 = da*da;
if (sour.iss == 1) sour.src[i] = cosf(2.0f*(float)PI*sour.f0*t);
else if (sour.iss == 2) sour.src[i] = (1.0f-2.0f*da2)*expf(-da2);
else if (sour.iss == 3) sour.src[i] = (t-t0)*expf(-da2);
else sour.src[i] = -4.0f*da*(float)PI*sour.f0*expf(-da2)
-2.0f*da*(float)PI*sour.f0*(1.0f-2.0f*da2)*expf(-da2);
}
return;
}
void forward(float t, Model model, Source sour, int *abc, float R, float tpoint, float *snap, float *seis)
{
// Extend model grids
int nxe = model.nx + abc[0] + abc[2];
int nze = model.nz + abc[1] + abc[3];
// Allocate memory for model parameters
float *rde = (float *)malloc(nxe * nze * sizeof(float));
float *rve = (float *)malloc(nxe * nze * sizeof(float));
float *kappa = (float *)malloc(nxe * nze * sizeof(float));
float *bux = (float *)malloc(nxe * nze * sizeof(float));
float *buz = (float *)malloc(nxe * nze * sizeof(float));
// Extend model grids
submodext(model.nx, model.nz, abc, model.rd, model.rv, rde, rve);
// Interpolate model parameters in-between grids
substager(nxe, nze, rde, rve, bux, buz, kappa);
// Load model parameters from host to device
float *d_bux;
cudaMalloc((void **) &d_bux, nxe * nze * sizeof(float));
cudaMemcpy(d_bux, bux, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_buz;
cudaMalloc((void **) &d_buz, nxe * nze * sizeof(float));
cudaMemcpy(d_buz, buz, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_kappa;
cudaMalloc((void **) &d_kappa, nxe * nze * sizeof(float));
cudaMemcpy(d_kappa, kappa, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
// Free memory on host
free(bux); free(buz); free(kappa);
// Allocate memory for PML boundary matrices
float *pmlx = (float *)malloc(nxe * nze * sizeof(float));
float *pmlz = (float *)malloc(nxe * nze * sizeof(float));
float *pmlxh = (float *)malloc(nxe * nze * sizeof(float));
float *pmlzh = (float *)malloc(nxe * nze * sizeof(float));
// Generate PML boundary governing matrices
subpml(model.nx, model.nz, model.dx, model.dz, R, abc, rve, pmlx, pmlz, pmlxh, pmlzh);
// Define and generate temper matrices of PML boundary
float *tempx = (float *)malloc(nxe * nze * sizeof(float));
float *tempz = (float *)malloc(nxe * nze * sizeof(float));
float *tempxh = (float *)malloc(nxe * nze * sizeof(float));
float *tempzh = (float *)malloc(nxe * nze * sizeof(float));
float *tempx5 = (float *)malloc(nxe * nze * sizeof(float));
float *tempz5 = (float *)malloc(nxe * nze * sizeof(float));
float *tempxh5 = (float *)malloc(nxe * nze * sizeof(float));
float *tempzh5 = (float *)malloc(nxe * nze * sizeof(float));
for (int iz = 0; iz < nze; iz++)
{
for (int ix = 0; ix < nxe; ix++)
{
tempx[iz * nxe + ix] = expf(-sour.dt * pmlx[iz * nxe + ix]);
tempz[iz * nxe + ix] = expf(-sour.dt * pmlz[iz * nxe + ix]);
tempxh[iz * nxe + ix] = expf(-sour.dt * pmlxh[iz * nxe + ix]);
tempzh[iz * nxe + ix] = expf(-sour.dt * pmlzh[iz * nxe + ix]);
tempx5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlx[iz * nxe + ix]);
tempz5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlz[iz * nxe + ix]);
tempxh5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlxh[iz * nxe + ix]);
tempzh5[iz * nxe + ix] = expf(-0.5f * sour.dt * pmlzh[iz * nxe + ix]);
}
}
// Load PML temper matrices from host to device
float *d_tempx;
cudaMalloc((void **) &d_tempx, nxe * nze * sizeof(float));
cudaMemcpy(d_tempx, tempx, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempz;
cudaMalloc((void **) &d_tempz, nxe * nze * sizeof(float));
cudaMemcpy(d_tempz, tempz, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempxh;
cudaMalloc((void **) &d_tempxh, nxe * nze * sizeof(float));
cudaMemcpy(d_tempxh, tempxh, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempzh;
cudaMalloc((void **) &d_tempzh, nxe * nze * sizeof(float));
cudaMemcpy(d_tempzh, tempzh, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempx5;
cudaMalloc((void **) &d_tempx5, nxe * nze * sizeof(float));
cudaMemcpy(d_tempx5, tempx5, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempz5;
cudaMalloc((void **) &d_tempz5, nxe * nze * sizeof(float));
cudaMemcpy(d_tempz5, tempz5, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempxh5;
cudaMalloc((void **) &d_tempxh5, nxe * nze * sizeof(float));
cudaMemcpy(d_tempxh5, tempxh5, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tempzh5;
cudaMalloc((void **) &d_tempzh5, nxe * nze * sizeof(float));
cudaMemcpy(d_tempzh5, tempzh5, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
// Free memory on host
free(pmlx); free(pmlz); free(pmlxh); free(pmlzh);
free(tempx); free(tempz); free(tempxh); free(tempzh);
free(tempx5); free(tempz5); free(tempxh5); free(tempzh5);
// Define zero vector with lenghth nxe x nze
float *zero = (float *)malloc(nxe * nze * sizeof(float));
for (int iz = 0; iz < nze; iz++)
for (int ix = 0; ix < nxe; ix++)
zero[iz * nxe + ix] = 0.0f;
// Define stress and strain vector on device and initialization
float *d_tau;
cudaMalloc((void **) &d_tau, nxe * nze * sizeof(float));
cudaMemcpy(d_tau, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_taux;
cudaMalloc((void **) &d_taux, nxe * nze * sizeof(float));
cudaMemcpy(d_taux, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_tauz;
cudaMalloc((void **) &d_tauz, nxe * nze * sizeof(float));
cudaMemcpy(d_tauz, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_velx;
cudaMalloc((void **) &d_velx, nxe * nze * sizeof(float));
cudaMemcpy(d_velx, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *d_velz;
cudaMalloc((void **) &d_velz, nxe * nze * sizeof(float));
cudaMemcpy(d_velz, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
// Define wavefield differentiation on device
float *dtau_x;
cudaMalloc((void **) &dtau_x, nxe * nze * sizeof(float));
cudaMemcpy(dtau_x, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *dtau_z;
cudaMalloc((void **) &dtau_z, nxe * nze * sizeof(float));
cudaMemcpy(d_tau, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *dvel_x;
cudaMalloc((void **) &dvel_x, nxe * nze * sizeof(float));
cudaMemcpy(dvel_x, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
float *dvel_z;
cudaMalloc((void **) &dvel_z, nxe * nze * sizeof(float));
cudaMemcpy(dvel_z, zero, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
// Define pressure wavefield on host
float *tau = (float *)malloc(nxe * nze * sizeof(float));
// Define source matrix on host and initiate
float *h_src = (float *)malloc(nxe * nze * sizeof(float));
for (int iz = 0; iz < nze; iz++)
for (int ix = 0; ix < nxe; ix++)
h_src[iz * nxe + ix] = 0.0f;
// Define source matrix on device
float *d_src;
cudaMalloc((void **) &d_src, nxe * nze * sizeof(float));
// Total time step for wavefield to propagate
int tot = NINT(t / sour.dt);
// Time point to store the wavefield snapshot
int tp = NINT(tpoint / sour.dt) - 1;
// Source location
int isx = NINT(sour.sx / model.dx) + abc[0];
int isz = NINT(sour.sz / model.dz) + abc[1];
// dtdx and dtdz
float dtdx = sour.dt / model.dx;
float dtdz = sour.dt / model.dz;
// time loop for wavefield propagating
for (int it = 0; it < tot; it++)
{
float sampl;
if ( it < sour.ns)
sampl = sour.src[it];
else sampl = 0.0f;
// Build source matrix
h_src[isz * nxe + isx] = sampl;
cudaMemcpy(d_src, h_src, nxe * nze * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIMX, BLOCK_DIMY);
dim3 dimGrid(nxe / BLOCK_DIMX, nze / BLOCK_DIMY);
fwd_2dhf_stg_orderN<<<dimGrid, dimBlock>>>(d_velx, dvel_x, d_kappa, nxe, nze);
bd_fwd_2dhf_stg_orderN<<<dimGrid, dimBlock>>>(d_velx, dvel_x, d_kappa, nxe, nze);
fwd_2dvf_stg_orderN<<<dimGrid, dimBlock>>>(d_velz, dvel_z, d_kappa, nxe, nze);
bd_fwd_2dvf_stg_orderN<<<dimGrid, dimBlock>>>(d_velz, dvel_z, d_kappa, nxe, nze);
MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_taux, d_tempx, d_tempx5, dvel_x, dtdx);
MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_tauz, d_tempz, d_tempz5, dvel_z, dtdz);
// MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_taux, d_tempx, d_tempx5, dvel_x, dtdx);
// MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_tauz, d_tempz, d_tempz5, dvel_z, dtdz);
AddSource<<<dimGrid, dimBlock>>>(nxe, nze, d_taux, d_tauz, d_src, d_tau);
fwd_2dhb_stg_orderN<<<dimGrid, dimBlock>>>(d_tau, dtau_x, d_bux, nxe, nze);
bd_fwd_2dhb_stg_orderN<<<dimGrid, dimBlock>>>(d_tau, dtau_x, d_bux, nxe, nze);
fwd_2dvb_stg_orderN<<<dimGrid, dimBlock>>>(d_tau, dtau_z, d_buz, nxe, nze);
bd_fwd_2dvb_stg_orderN<<<dimGrid, dimBlock>>>(d_tau, dtau_z, d_buz, nxe, nze);
MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_velx, d_tempx, d_tempx5, dtau_x, dtdx);
MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_velz, d_tempz, d_tempz5, dtau_z, dtdz);
// MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_velx, d_tempxh, d_tempxh5, dtau_x, dtdx);
// MatMulAdd_PerElem<<<dimGrid, dimBlock>>>(nxe, nze, d_velz, d_tempzh, d_tempzh5, dtau_z, dtdz);
// Save snapshot at time tp
cudaMemcpy(tau, d_tau, nxe * nze * sizeof(float), cudaMemcpyDeviceToHost);
for (int ix = abc[0]; ix < model.nx + abc[0]; ix++)
seis[it * model.nx + ix - abc[0]] = tau[abc[1] * nxe + ix];
if (it == tp)
{
for (int iz = abc[1]; iz < model.nz + abc[1]; iz++)
for (int ix = abc[0]; ix < model.nx + abc[0]; ix++)
snap[(iz-abc[1]) * model.nx + ix - abc[0]] = tau[iz * nxe + ix];
}
}
// Free memory on device
cudaFree(dtau_x); cudaFree(dtau_z);
cudaFree(dvel_x); cudaFree(dvel_z);
cudaFree(d_taux); cudaFree(d_tauz);
cudaFree(d_velx); cudaFree(d_velz);
cudaFree(d_tau); cudaFree(d_kappa);
cudaFree(d_bux); cudaFree(d_buz);
cudaFree(d_src);
cudaFree(d_tempx); cudaFree(d_tempz);
cudaFree(d_tempxh); cudaFree(d_tempzh);
cudaFree(d_tempx5); cudaFree(d_tempz5);
cudaFree(d_tempxh5); cudaFree(d_tempzh5);
// Free memory on host
free(rde); free(rve);
free(tau); free(h_src);
}
#define row (512 - 40)
#define col (512 - 40)
int main(void)
{
// Differentiation coefficients
float h_coeff[radius] = {1225.f/1024.0f, -245.f/3072.f, 49.f/5120.f, -5.f/7168.f};
cudaMemcpyToSymbol(c_coeff, h_coeff, radius * sizeof(float));
if ( cudaGetLastError() != cudaSuccess )
{
printf("coefficient upload to GPU failed \n");
exit(-3);
}
// set time and boundary
float t = 3.5f;
float tp = 1.5f;
float R = 1.0e-6f;
int abc[4] = {20, 20, 20, 20};
// set model parameters
Model model;
model.nx = col;
model.nz = row;
model.dx = 10.0f;
model.dz = 10.0f;
model.rd = (float *)malloc(model.nx * model.nz * sizeof(float));
model.rv = (float *)malloc(model.nx * model.nz * sizeof(float));
int Lx = model.nx + abc[0] + abc[2];
int Lz = model.nz + abc[1] + abc[3];
for (int iz = 0; iz < model.nz; iz ++)
{
for (int ix = 0; ix < model.nx; ix++)
{
model.rd[iz * model.nx + ix] = 1500.0f;
if (iz > 3 * Lz /8 - abc[1] && ix > 3 * Lx / 8 -abc[0])
model.rv[iz * model.nx + ix] = 3500.0f;
else
model.rv[iz * model.nx + ix] = 1500.0f;
}
}
// Set source parameters
Source sour;
sour.ns = 512;
sour.sx = (3 * Lx / 8 - abc[0]) * model.dx;
sour.sz = (Lz / 4 - abc[1]) * model.dz;
sour.dt = 0.001;
sour.f0 = 15.0;
sour.iss = 2;
sour.src = (float *)malloc(sour.ns * sizeof(float));
wavelet(sour);
// Allocate memory for snapshot wavefield
float *snap = (float *)malloc(model.nx * model.nz * sizeof(float));
float *seis = (float *)malloc(model.nx * NINT(t / sour.dt) * sizeof(float));
clock_t start = clock();
forward(t, model, sour, abc, R, tp, snap, seis);
clock_t end = clock();
printf("Time for computation of size %d x %d is %es\n", model.nx, model.nz, (double)(end - start) / CLOCKS_PER_SEC);
FILE *fsnap = fopen("fsnap.dat", "w");
for (int ix = 0; ix < model.nx; ix++)
for (int iz = 0; iz < model.nz; iz++)
fprintf(fsnap, "%e\n", snap[iz * model.nx + ix]);
fclose(fsnap);
printf("nstep = %d\n", NINT(t / sour.dt));
FILE * fseis = fopen("fseis.dat","w");
for (int ix = 0; ix < model.nx; ix ++)
for (int it = 0; it < NINT(t / sour.dt); it ++)
fprintf(fseis, "%e\n", seis[it * model.nx + ix]);
fclose(fseis);
// Free memory
free(model.rd);
free(model.rv);
free(sour.src);
free(snap); free(seis);
return 0;
}
|
5d1d229add82b776ab834dc46ced71db38b5b73e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 10240
#define INF 2e10f
struct Sphere{
float r, g, b;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n){
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy < radius*radius){
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
__global__ void kernel(Sphere *s, unsigned char *ptr){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++){
float n,t = s[i].hit(ox, oy, &n);
if(t > maxz){
float fscale = n;
r = s[i].r*fscale;
g = s[i].g*fscale;
b = s[i].b*fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int) (r*255);
ptr[offset*4 + 1] = (int) (g*255);
ptr[offset*4 + 2] = (int) (b*255);
ptr[offset*4 + 3] = 255;
}
int main(void){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
Sphere *s;
hipMalloc((void **)&dev_bitmap, bitmap.image_size());
hipMalloc((void **)&s, sizeof(Sphere)*SPHERES);
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES);
for(int i = 0; i < SPHERES; i++){
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
hipMemcpy(s, temp_s, sizeof(Sphere)*SPHERES, hipMemcpyHostToDevice);
free(temp_s);
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, s, dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float dt;
hipEventElapsedTime(&dt, start, stop);
printf("Time taken: %3.1f ms\n", dt);
bitmap.display_and_exit();
hipFree(dev_bitmap);
hipFree(s);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 5d1d229add82b776ab834dc46ced71db38b5b73e.cu | #include "../common/book.h"
#include "../common/cpu_bitmap.h"
#define rnd(x) (x*rand()/RAND_MAX)
#define SPHERES 20
#define DIM 10240
#define INF 2e10f
struct Sphere{
float r, g, b;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n){
float dx = ox - x;
float dy = oy - y;
if(dx*dx + dy*dy < radius*radius){
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz + z;
}
return -INF;
}
};
__global__ void kernel(Sphere *s, unsigned char *ptr){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*blockDim.x*gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++){
float n,t = s[i].hit(ox, oy, &n);
if(t > maxz){
float fscale = n;
r = s[i].r*fscale;
g = s[i].g*fscale;
b = s[i].b*fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int) (r*255);
ptr[offset*4 + 1] = (int) (g*255);
ptr[offset*4 + 2] = (int) (b*255);
ptr[offset*4 + 3] = 255;
}
int main(void){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
Sphere *s;
cudaMalloc((void **)&dev_bitmap, bitmap.image_size());
cudaMalloc((void **)&s, sizeof(Sphere)*SPHERES);
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere)*SPHERES);
for(int i = 0; i < SPHERES; i++){
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
cudaMemcpy(s, temp_s, sizeof(Sphere)*SPHERES, cudaMemcpyHostToDevice);
free(temp_s);
dim3 grids(DIM/16, DIM/16);
dim3 threads(16, 16);
kernel<<<grids, threads>>>(s, dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float dt;
cudaEventElapsedTime(&dt, start, stop);
printf("Time taken: %3.1f ms\n", dt);
bitmap.display_and_exit();
cudaFree(dev_bitmap);
cudaFree(s);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
bc5dc187188bca282226544be21cab190d4496af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 16.04.2018
//
#include <ops/declarable/helpers/reverse.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <TAD.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void reverseArrayKernel(void* input, Nd4jLong *inputShape, void* output, Nd4jLong *outputShape, Nd4jLong numOfElemsToReverse) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
__shared__ int linearStatus;
__shared__ T* inputArr;
__shared__ T* outputArr;
__shared__ char inputOrder, outputOrder;
if (threadIdx.x == 0) {
linearStatus = (shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape)) && (inputOrder == outputOrder)? shape::elementWiseStride(inputShape):0;
char inputOrder = shape::order(inputShape);
char outputOrder = shape::order(outputShape);
inputArr = reinterpret_cast<T*>(input);
outputArr = reinterpret_cast<T*>(output);
}
__syncthreads();
auto odd = numOfElemsToReverse % 2 != 0;
auto limit = numOfElemsToReverse / 2;
for (Nd4jLong e = tid; e < limit; e += step) {
// we're calculating offsets within input array
auto fOffset = shape::getIndexOffset(e, inputShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, inputShape);
// now we're storing input values
auto v1 = inputArr[fOffset];
auto v2 = inputArr[lOffset];
// now we're calculating offsets within output array
auto zfOffset = shape::getIndexOffset(e, outputShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, outputShape);
// and saving values to output arrays
outputArr[zfOffset] = v2;
outputArr[zlOffset] = v1;
}
// in case of odd array we'll have to move middle value
if (odd && tid == 0) {
auto xOffset = shape::getIndexOffset(limit, inputShape);
auto zOffset = shape::getIndexOffset(limit, outputShape);
outputArr[zOffset] = inputArr[xOffset];
}
}
template<typename T>
static void reverseArray(nd4j::LaunchContext * context, NDArray* input, NDArray* output, Nd4jLong numOfElemsToReverse) {
auto stream = context->getCudaStream();
Nd4jLong numOfReverse = numOfElemsToReverse;
if (numOfElemsToReverse == 0)
numOfReverse = input->lengthOf();
hipLaunchKernelGGL(( reverseArrayKernel<T>), dim3(256), dim3(512), 8192, *stream, input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfReverse);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void reverseSequence_(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim){
int posOfNonUnityDim = -1;
seqLengths->syncToHost();
auto stream = context->getCudaStream();
if(input->isVector() || shape::isLikeVector(input->getShapeInfo(), posOfNonUnityDim) || seqLengths->lengthOf() == 1) {
int numOfElemsToReverse = seqLengths->e<int>(0);
if((seqDim == 0 && input->sizeAt(0) == 1) || (batchDim == posOfNonUnityDim))
output->assign(input);
else
hipLaunchKernelGGL(( reverseArrayKernel<T>), dim3(256), dim3(512), 8192, *stream, input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfElemsToReverse);//helpers::reverseArray<T>(context, const_cast<NDArray*>(input), output, numOfElemsToReverse);
}
else {
if(seqDim > batchDim)
--seqDim;
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {batchDim});
auto inSubArrsSet = input->allTensorsAlongDimension(dimensions);
auto outSubArrsSet = output->allTensorsAlongDimension(dimensions);
for(int i = 0; i < inSubArrsSet->size(); ++i) {
int numOfElemsToReverse = seqLengths->e<int>(i);
if(numOfElemsToReverse == 0 || numOfElemsToReverse == 1) {
outSubArrsSet->at(i)->assign(inSubArrsSet->at(i));
}
else {
auto inInnerSet = inSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
auto outInnerSet = outSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
for(int j = 0; j < inInnerSet->size(); ++j)
reverseArray<T>(context, inInnerSet->at(j), outInnerSet->at(j), numOfElemsToReverse);
delete inInnerSet;
delete outInnerSet;
}
}
delete inSubArrsSet;
delete outSubArrsSet;
}
}
void reverseSequence(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim) {
NDArray::prepareSpecialUse({output}, {input, seqLengths});
// if op isn't inplace - copy original data into output array
if (output->getSpecialBuffer() != input->getSpecialBuffer())
output->assign(input);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseSequence_, (context, input, seqLengths, output, seqDim, batchDim), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input, seqLengths});
}
//////////////////////////////////////////////////////////////////////////
void reverse(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, const std::vector<int>* intArgs, bool isBackProp) {
// we need to reverse axis only if that's new op
std::vector<int> dimensions = isBackProp ? ShapeUtils::evalDimsToExclude(input->rankOf(), *intArgs) : *intArgs;
std::vector<int> axis = ShapeUtils::evalDimsToExclude(input->rankOf(), dimensions);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), axis);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), axis);
auto listOut = output->allTensorsAlongDimension(dimensions);
auto listIn = input->allTensorsAlongDimension(dimensions);
NDArray *subArrIn, *subArrOut;
NDArray::prepareSpecialUse({output}, {input});
for(int i = 0; i < listIn->size(); ++i) { // listIn->size() = listOut->size()
subArrIn = listIn->at(i);
subArrOut = listOut->at(i);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, subArrIn, subArrOut, 0), LIBND4J_TYPES);
}
//BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, const_cast<NDArray*>(input), output, (int)0), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input});
delete listOut;
delete listIn;
}
BUILD_SINGLE_TEMPLATE(template void reverseArray, (nd4j::LaunchContext * context, NDArray *inArr, NDArray *outArr, Nd4jLong numOfElemsToReverse), LIBND4J_TYPES);
}
}
}
| bc5dc187188bca282226544be21cab190d4496af.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 16.04.2018
//
#include <ops/declarable/helpers/reverse.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <TAD.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void reverseArrayKernel(void* input, Nd4jLong *inputShape, void* output, Nd4jLong *outputShape, Nd4jLong numOfElemsToReverse) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
__shared__ int linearStatus;
__shared__ T* inputArr;
__shared__ T* outputArr;
__shared__ char inputOrder, outputOrder;
if (threadIdx.x == 0) {
linearStatus = (shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape)) && (inputOrder == outputOrder)? shape::elementWiseStride(inputShape):0;
char inputOrder = shape::order(inputShape);
char outputOrder = shape::order(outputShape);
inputArr = reinterpret_cast<T*>(input);
outputArr = reinterpret_cast<T*>(output);
}
__syncthreads();
auto odd = numOfElemsToReverse % 2 != 0;
auto limit = numOfElemsToReverse / 2;
for (Nd4jLong e = tid; e < limit; e += step) {
// we're calculating offsets within input array
auto fOffset = shape::getIndexOffset(e, inputShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, inputShape);
// now we're storing input values
auto v1 = inputArr[fOffset];
auto v2 = inputArr[lOffset];
// now we're calculating offsets within output array
auto zfOffset = shape::getIndexOffset(e, outputShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, outputShape);
// and saving values to output arrays
outputArr[zfOffset] = v2;
outputArr[zlOffset] = v1;
}
// in case of odd array we'll have to move middle value
if (odd && tid == 0) {
auto xOffset = shape::getIndexOffset(limit, inputShape);
auto zOffset = shape::getIndexOffset(limit, outputShape);
outputArr[zOffset] = inputArr[xOffset];
}
}
template<typename T>
static void reverseArray(nd4j::LaunchContext * context, NDArray* input, NDArray* output, Nd4jLong numOfElemsToReverse) {
auto stream = context->getCudaStream();
Nd4jLong numOfReverse = numOfElemsToReverse;
if (numOfElemsToReverse == 0)
numOfReverse = input->lengthOf();
reverseArrayKernel<T><<<256, 512, 8192, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfReverse);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void reverseSequence_(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim){
int posOfNonUnityDim = -1;
seqLengths->syncToHost();
auto stream = context->getCudaStream();
if(input->isVector() || shape::isLikeVector(input->getShapeInfo(), posOfNonUnityDim) || seqLengths->lengthOf() == 1) {
int numOfElemsToReverse = seqLengths->e<int>(0);
if((seqDim == 0 && input->sizeAt(0) == 1) || (batchDim == posOfNonUnityDim))
output->assign(input);
else
reverseArrayKernel<T><<<256, 512, 8192, *stream>>>(input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfElemsToReverse);//helpers::reverseArray<T>(context, const_cast<NDArray*>(input), output, numOfElemsToReverse);
}
else {
if(seqDim > batchDim)
--seqDim;
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {batchDim});
auto inSubArrsSet = input->allTensorsAlongDimension(dimensions);
auto outSubArrsSet = output->allTensorsAlongDimension(dimensions);
for(int i = 0; i < inSubArrsSet->size(); ++i) {
int numOfElemsToReverse = seqLengths->e<int>(i);
if(numOfElemsToReverse == 0 || numOfElemsToReverse == 1) {
outSubArrsSet->at(i)->assign(inSubArrsSet->at(i));
}
else {
auto inInnerSet = inSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
auto outInnerSet = outSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
for(int j = 0; j < inInnerSet->size(); ++j)
reverseArray<T>(context, inInnerSet->at(j), outInnerSet->at(j), numOfElemsToReverse);
delete inInnerSet;
delete outInnerSet;
}
}
delete inSubArrsSet;
delete outSubArrsSet;
}
}
void reverseSequence(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim) {
NDArray::prepareSpecialUse({output}, {input, seqLengths});
// if op isn't inplace - copy original data into output array
if (output->getSpecialBuffer() != input->getSpecialBuffer())
output->assign(input);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseSequence_, (context, input, seqLengths, output, seqDim, batchDim), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input, seqLengths});
}
//////////////////////////////////////////////////////////////////////////
void reverse(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, const std::vector<int>* intArgs, bool isBackProp) {
// we need to reverse axis only if that's new op
std::vector<int> dimensions = isBackProp ? ShapeUtils::evalDimsToExclude(input->rankOf(), *intArgs) : *intArgs;
std::vector<int> axis = ShapeUtils::evalDimsToExclude(input->rankOf(), dimensions);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), axis);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), axis);
auto listOut = output->allTensorsAlongDimension(dimensions);
auto listIn = input->allTensorsAlongDimension(dimensions);
NDArray *subArrIn, *subArrOut;
NDArray::prepareSpecialUse({output}, {input});
for(int i = 0; i < listIn->size(); ++i) { // listIn->size() = listOut->size()
subArrIn = listIn->at(i);
subArrOut = listOut->at(i);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, subArrIn, subArrOut, 0), LIBND4J_TYPES);
}
//BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, const_cast<NDArray*>(input), output, (int)0), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input});
delete listOut;
delete listIn;
}
BUILD_SINGLE_TEMPLATE(template void reverseArray, (nd4j::LaunchContext * context, NDArray *inArr, NDArray *outArr, Nd4jLong numOfElemsToReverse), LIBND4J_TYPES);
}
}
}
|
0f54fdc7936218753cc464b52807de332c4cc642.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/brelu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void BReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype minval, Dtype maxval) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] < 1 ? (in[index] > 0 ? in[index] : in[index] * minval) : maxval;
}
}
template <typename Dtype>
void BReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype minval = this -> layer_param_.brelu_param().minval();
Dtype maxval = this -> layer_param_.brelu_param().maxval();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, minval, maxval);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void BReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype minval, Dtype maxval) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0 && in_data[index] <= 1)
+ (in_data[index] <= 0 || in_data[index] >= maxval) * minval);
}
}
template <typename Dtype>
void BReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype minval = this -> layer_param_.brelu_param().minval();
Dtype maxval = this -> layer_param_.brelu_param().maxval();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, minval, maxval);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BReLULayer);
} // namespace caffe
| 0f54fdc7936218753cc464b52807de332c4cc642.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/brelu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void BReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype minval, Dtype maxval) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] < 1 ? (in[index] > 0 ? in[index] : in[index] * minval) : maxval;
}
}
template <typename Dtype>
void BReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype minval = this -> layer_param_.brelu_param().minval();
Dtype maxval = this -> layer_param_.brelu_param().maxval();
// NOLINT_NEXT_LINE(whitespace/operators)
BReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, minval, maxval);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void BReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype minval, Dtype maxval) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0 && in_data[index] <= 1)
+ (in_data[index] <= 0 || in_data[index] >= maxval) * minval);
}
}
template <typename Dtype>
void BReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype minval = this -> layer_param_.brelu_param().minval();
Dtype maxval = this -> layer_param_.brelu_param().maxval();
// NOLINT_NEXT_LINE(whitespace/operators)
BReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, minval, maxval);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BReLULayer);
} // namespace caffe
|
85992b75f0763f3d3a849d5339f6a8c085bd53e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "OptixPreprocessor.h"
#include "OptixMath.h"
#include "OptixBufferView.h"
#include "OptixLight.h"
#include "OptixGeometry.h"
#include "OptixMaterialData.h"
#include "OptixRecordData.h"
#include "OptixLaunchParams.h"
#include <string>
#include "LocalGeometry.h"
extern "C" {
__constant__ SystemData sysData;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
uint32_t occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
WHITTED_RAY_TYPE_OCCLUSION, // SBT offset
WHITTED_RAY_TYPE_COUNT, // SBT stride
WHITTED_RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
return make_uchar4(
static_cast<uint8_t>( clamp( c.x, 0.0f, 1.0f ) *255.0f ),
static_cast<uint8_t>( clamp( c.y, 0.0f, 1.0f ) *255.0f ),
static_cast<uint8_t>( clamp( c.z, 0.0f, 1.0f ) *255.0f ),
255u
);
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
DreamerPayload* payload
)
{
uint32_t u0=0, u1=0, u2=0, u3=~0,u4= ~0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
WHITTED_RAY_TYPE_RADIANCE, // SBT offset
WHITTED_RAY_TYPE_COUNT, // SBT stride
WHITTED_RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3, u4);
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->meshID = u3;
payload->primitiveID = u4;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
extern "C" __global__ void __raygen__eye_path()
{
const uint2 theLaunchDim = make_uint2(optixGetLaunchDimensions());
const uint2 theLaunchIndex = make_uint2(optixGetLaunchIndex());
const RayGenData* rtData = (RayGenData*)optixGetSbtDataPointer();
const float3 U = rtData->camera_u;
const float3 V = rtData->camera_v;
const float3 W = rtData->camera_w;
const int subframe_index = sysData.subframe_index;
uint32_t seed = tea<4>( theLaunchIndex.y*theLaunchDim.x + theLaunchIndex.x, subframe_index );
const float2 subpixel_jitter = subframe_index == 0 ?
make_float2( 0.0f, 0.0f ) :
make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
// Decoupling the launch dimension from the screen resolution will allow for partial rendering algorithms.
const float2 screen = make_float2(sysData.resolution); // Note, not using theLaunchDim here!
// E.g. assume theLaunchDim == sysData.resolution for rendering and theLaunchDim == (1,1) for picking.
const float2 fragment = (!sysData.pickingEnabled) ? make_float2(theLaunchIndex) + subpixel_jitter : sysData.pickingFragment;
const float2 ndc = 2 * (fragment / screen) - 1.0f; // Normalized device coordinates in range [-1, 1].
// Assume sysData.camera contains the usual pinhole camera setup.
float3 origin = rtData->cam_eye;
float3 direction = normalize(U * ndc.x + V * ndc.y + W);
/* if(sysData.pickingEnabled)
{
printf("############################################\n");
printf ("CAM EYE: %f , %f , %f \n", origin.x, origin.y, origin.z);
printf ("RAY DIR: %f , %f , %f \n", direction.x, direction.y, direction.z);
printf ("SCREEN: %f , %f \n", screen.x, screen.y);
printf ("FRAGMENT: %f , %f \n", fragment.x, fragment.y);
printf("############################################\n");
} */
DreamerPayload payload;
payload.result = make_float3(0.0f);
payload.meshID = ~0;
payload.primitiveID = ~0;
traceRadiance(
sysData.topObject,
origin,
direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
if(sysData.pickingEnabled)
{
unsigned int* pickData = reinterpret_cast<unsigned int*>(sysData.pickingBuffer);
pickData[0] = payload.meshID;
pickData[1] = payload.primitiveID;
//printf("Picked mesh: %i\n", pickData[0] );
//printf("Picked triangle: %i\n", pickData[1]);
}
const unsigned int index = theLaunchDim.x * theLaunchIndex.y + theLaunchIndex.x;
float3 accum_color = payload.result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( sysData.accum_buffer[ index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
sysData.accum_buffer[ index ] = make_float4( accum_color, 1.0f);
uchar4 * buffer = reinterpret_cast<uchar4*>(sysData.outputBuffer);
buffer[index] = make_color(accum_color);
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<uint32_t>( occluded ) );
}
extern "C" __global__ void __miss__miss()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayloadResult( make_float3( rt_data->r, rt_data->g, rt_data->b ) );
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
if(sysData.pickingEnabled)
{
unsigned int primID = optixGetPrimitiveIndex();
unsigned int instID = optixGetInstanceId();
// printf("Picked mesh: %i\n", instID );
// printf("Picked triangle: %i\n", primID);
optixSetPayload_3( static_cast<unsigned int>( instID ) );
optixSetPayload_4( static_cast<unsigned int>( primID ) );
return;
}
const HitGroupSBT* hit_group_data = reinterpret_cast<HitGroupSBT*>( optixGetSbtDataPointer() );
if( hit_group_data == nullptr) return;
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= linearize( make_float3(
tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y )
) );
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color*( 1.0f - F0 )*( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness*roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN = 2.0f*tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4(1.0f);
N = normalize( NN.x*normalize( geom.dpdu ) + NN.y*normalize( geom.dpdv ) + NN.z*geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < sysData.lights.count; ++i )
{
//result = make_float3( 1.0f );
OptixLight::Point light = sysData.lights[i];
// TODO: optimize
const float L_dist = length( light.position - geom.P );
const float3 L = ( light.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
//result = make_float3( 1.0f, 1.0f, 0.0f );
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = traceOcclusion( sysData.topObject, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = schlick( spec_color, V_dot_H );
const float G_vis = vis( N_dot_L, N_dot_V, alpha );
const float D = ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F )*diff_color / M_PIf;
const float3 spec = F*G_vis*D;
result += light.color*light.intensity*N_dot_L*( diff + spec );
}
//result = make_float3( 1.0f );
}
}
// TODO: add debug viewing mode that allows runtime switchable views of shading params, normals, etc
//result = make_float3( roughness );
//result = N*0.5f + make_float3( 0.5f );
//result = geom.N*0.5f + make_float3( 0.5f );
setPayloadResult( result );
// no rendering material yet, just make it blue
//setPayloadResult( make_float3( 0.0f, 0.0f, 1.0f) );
}
extern "C" __global__ void __exception__all()
{
const uint3 theLaunchIndex = optixGetLaunchIndex();
const int theExceptionCode = optixGetExceptionCode();
printf("Exception %d at (%u, %u)\n", theExceptionCode, theLaunchIndex.x, theLaunchIndex.y);
// DAR FIXME This only works for render strategies where the launch dimension matches the outputBuffer resolution.
//float4* buffer = reinterpret_cast<float4*>(sysData.outputBuffer);
//const unsigned int index = theLaunchDim.x * theLaunchIndex.y + theLaunchIndex.x;
//buffer[index] = make_float4(1000000.0f, 0.0f, 1000000.0f, 1.0f); // super magenta
} | 85992b75f0763f3d3a849d5339f6a8c085bd53e7.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "OptixPreprocessor.h"
#include "OptixMath.h"
#include "OptixBufferView.h"
#include "OptixLight.h"
#include "OptixGeometry.h"
#include "OptixMaterialData.h"
#include "OptixRecordData.h"
#include "OptixLaunchParams.h"
#include <string>
#include "LocalGeometry.h"
extern "C" {
__constant__ SystemData sysData;
}
//------------------------------------------------------------------------------
//
// GGX/smith shading helpers
// TODO: move into header so can be shared by path tracer and bespoke renderers
//
//------------------------------------------------------------------------------
__device__ float3 schlick( const float3 spec_color, const float V_dot_H )
{
return spec_color + ( make_float3( 1.0f ) - spec_color ) * powf( 1.0f - V_dot_H, 5.0f );
}
__device__ float vis( const float N_dot_L, const float N_dot_V, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float ggx0 = N_dot_L * sqrtf( N_dot_V*N_dot_V * ( 1.0f - alpha_sq ) + alpha_sq );
const float ggx1 = N_dot_V * sqrtf( N_dot_L*N_dot_L * ( 1.0f - alpha_sq ) + alpha_sq );
return 2.0f * N_dot_L * N_dot_V / (ggx0+ggx1);
}
__device__ float ggxNormal( const float N_dot_H, const float alpha )
{
const float alpha_sq = alpha*alpha;
const float N_dot_H_sq = N_dot_H*N_dot_H;
const float x = N_dot_H_sq*( alpha_sq - 1.0f ) + 1.0f;
return alpha_sq/( M_PIf*x*x );
}
__device__ float3 linearize( float3 c )
{
return make_float3(
powf( c.x, 2.2f ),
powf( c.y, 2.2f ),
powf( c.z, 2.2f )
);
}
static __forceinline__ __device__ bool traceOcclusion(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax
)
{
uint32_t occluded = 0u;
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT,
WHITTED_RAY_TYPE_OCCLUSION, // SBT offset
WHITTED_RAY_TYPE_COUNT, // SBT stride
WHITTED_RAY_TYPE_OCCLUSION, // missSBTIndex
occluded );
return occluded;
}
__forceinline__ __device__ uchar4 make_color( const float3& c )
{
return make_uchar4(
static_cast<uint8_t>( clamp( c.x, 0.0f, 1.0f ) *255.0f ),
static_cast<uint8_t>( clamp( c.y, 0.0f, 1.0f ) *255.0f ),
static_cast<uint8_t>( clamp( c.z, 0.0f, 1.0f ) *255.0f ),
255u
);
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
DreamerPayload* payload
)
{
uint32_t u0=0, u1=0, u2=0, u3=~0,u4= ~0;
optixTrace(
handle,
ray_origin, ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
WHITTED_RAY_TYPE_RADIANCE, // SBT offset
WHITTED_RAY_TYPE_COUNT, // SBT stride
WHITTED_RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1, u2, u3, u4);
payload->result.x = __int_as_float( u0 );
payload->result.y = __int_as_float( u1 );
payload->result.z = __int_as_float( u2 );
payload->meshID = u3;
payload->primitiveID = u4;
}
__forceinline__ __device__ void setPayloadResult( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
extern "C" __global__ void __raygen__eye_path()
{
const uint2 theLaunchDim = make_uint2(optixGetLaunchDimensions());
const uint2 theLaunchIndex = make_uint2(optixGetLaunchIndex());
const RayGenData* rtData = (RayGenData*)optixGetSbtDataPointer();
const float3 U = rtData->camera_u;
const float3 V = rtData->camera_v;
const float3 W = rtData->camera_w;
const int subframe_index = sysData.subframe_index;
uint32_t seed = tea<4>( theLaunchIndex.y*theLaunchDim.x + theLaunchIndex.x, subframe_index );
const float2 subpixel_jitter = subframe_index == 0 ?
make_float2( 0.0f, 0.0f ) :
make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
// Decoupling the launch dimension from the screen resolution will allow for partial rendering algorithms.
const float2 screen = make_float2(sysData.resolution); // Note, not using theLaunchDim here!
// E.g. assume theLaunchDim == sysData.resolution for rendering and theLaunchDim == (1,1) for picking.
const float2 fragment = (!sysData.pickingEnabled) ? make_float2(theLaunchIndex) + subpixel_jitter : sysData.pickingFragment;
const float2 ndc = 2 * (fragment / screen) - 1.0f; // Normalized device coordinates in range [-1, 1].
// Assume sysData.camera contains the usual pinhole camera setup.
float3 origin = rtData->cam_eye;
float3 direction = normalize(U * ndc.x + V * ndc.y + W);
/* if(sysData.pickingEnabled)
{
printf("############################################\n");
printf ("CAM EYE: %f , %f , %f \n", origin.x, origin.y, origin.z);
printf ("RAY DIR: %f , %f , %f \n", direction.x, direction.y, direction.z);
printf ("SCREEN: %f , %f \n", screen.x, screen.y);
printf ("FRAGMENT: %f , %f \n", fragment.x, fragment.y);
printf("############################################\n");
} */
DreamerPayload payload;
payload.result = make_float3(0.0f);
payload.meshID = ~0;
payload.primitiveID = ~0;
traceRadiance(
sysData.topObject,
origin,
direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&payload );
if(sysData.pickingEnabled)
{
unsigned int* pickData = reinterpret_cast<unsigned int*>(sysData.pickingBuffer);
pickData[0] = payload.meshID;
pickData[1] = payload.primitiveID;
//printf("Picked mesh: %i\n", pickData[0] );
//printf("Picked triangle: %i\n", pickData[1]);
}
const unsigned int index = theLaunchDim.x * theLaunchIndex.y + theLaunchIndex.x;
float3 accum_color = payload.result;
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( sysData.accum_buffer[ index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
sysData.accum_buffer[ index ] = make_float4( accum_color, 1.0f);
uchar4 * buffer = reinterpret_cast<uchar4*>(sysData.outputBuffer);
buffer[index] = make_color(accum_color);
}
__forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<uint32_t>( occluded ) );
}
extern "C" __global__ void __miss__miss()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayloadResult( make_float3( rt_data->r, rt_data->g, rt_data->b ) );
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
extern "C" __global__ void __closesthit__radiance()
{
if(sysData.pickingEnabled)
{
unsigned int primID = optixGetPrimitiveIndex();
unsigned int instID = optixGetInstanceId();
// printf("Picked mesh: %i\n", instID );
// printf("Picked triangle: %i\n", primID);
optixSetPayload_3( static_cast<unsigned int>( instID ) );
optixSetPayload_4( static_cast<unsigned int>( primID ) );
return;
}
const HitGroupSBT* hit_group_data = reinterpret_cast<HitGroupSBT*>( optixGetSbtDataPointer() );
if( hit_group_data == nullptr) return;
const LocalGeometry geom = getLocalGeometry( hit_group_data->geometry_data );
//
// Retrieve material data
//
float3 base_color = make_float3( hit_group_data->material_data.pbr.base_color );
if( hit_group_data->material_data.pbr.base_color_tex )
base_color *= linearize( make_float3(
tex2D<float4>( hit_group_data->material_data.pbr.base_color_tex, geom.UV.x, geom.UV.y )
) );
float metallic = hit_group_data->material_data.pbr.metallic;
float roughness = hit_group_data->material_data.pbr.roughness;
float4 mr_tex = make_float4( 1.0f );
if( hit_group_data->material_data.pbr.metallic_roughness_tex )
// MR tex is (occlusion, roughness, metallic )
mr_tex = tex2D<float4>( hit_group_data->material_data.pbr.metallic_roughness_tex, geom.UV.x, geom.UV.y );
roughness *= mr_tex.y;
metallic *= mr_tex.z;
//
// Convert to material params
//
const float F0 = 0.04f;
const float3 diff_color = base_color*( 1.0f - F0 )*( 1.0f - metallic );
const float3 spec_color = lerp( make_float3( F0 ), base_color, metallic );
const float alpha = roughness*roughness;
//
// compute direct lighting
//
float3 N = geom.N;
if( hit_group_data->material_data.pbr.normal_tex )
{
const float4 NN = 2.0f*tex2D<float4>( hit_group_data->material_data.pbr.normal_tex, geom.UV.x, geom.UV.y ) - make_float4(1.0f);
N = normalize( NN.x*normalize( geom.dpdu ) + NN.y*normalize( geom.dpdv ) + NN.z*geom.N );
}
float3 result = make_float3( 0.0f );
for( int i = 0; i < sysData.lights.count; ++i )
{
//result = make_float3( 1.0f );
OptixLight::Point light = sysData.lights[i];
// TODO: optimize
const float L_dist = length( light.position - geom.P );
const float3 L = ( light.position - geom.P ) / L_dist;
const float3 V = -normalize( optixGetWorldRayDirection() );
const float3 H = normalize( L + V );
const float N_dot_L = dot( N, L );
const float N_dot_V = dot( N, V );
const float N_dot_H = dot( N, H );
const float V_dot_H = dot( V, H );
if( N_dot_L > 0.0f && N_dot_V > 0.0f )
{
//result = make_float3( 1.0f, 1.0f, 0.0f );
const float tmin = 0.001f; // TODO
const float tmax = L_dist - 0.001f; // TODO
const bool occluded = traceOcclusion( sysData.topObject, geom.P, L, tmin, tmax );
if( !occluded )
{
const float3 F = schlick( spec_color, V_dot_H );
const float G_vis = vis( N_dot_L, N_dot_V, alpha );
const float D = ggxNormal( N_dot_H, alpha );
const float3 diff = ( 1.0f - F )*diff_color / M_PIf;
const float3 spec = F*G_vis*D;
result += light.color*light.intensity*N_dot_L*( diff + spec );
}
//result = make_float3( 1.0f );
}
}
// TODO: add debug viewing mode that allows runtime switchable views of shading params, normals, etc
//result = make_float3( roughness );
//result = N*0.5f + make_float3( 0.5f );
//result = geom.N*0.5f + make_float3( 0.5f );
setPayloadResult( result );
// no rendering material yet, just make it blue
//setPayloadResult( make_float3( 0.0f, 0.0f, 1.0f) );
}
extern "C" __global__ void __exception__all()
{
const uint3 theLaunchIndex = optixGetLaunchIndex();
const int theExceptionCode = optixGetExceptionCode();
printf("Exception %d at (%u, %u)\n", theExceptionCode, theLaunchIndex.x, theLaunchIndex.y);
// DAR FIXME This only works for render strategies where the launch dimension matches the outputBuffer resolution.
//float4* buffer = reinterpret_cast<float4*>(sysData.outputBuffer);
//const unsigned int index = theLaunchDim.x * theLaunchIndex.y + theLaunchIndex.x;
//buffer[index] = make_float4(1000000.0f, 0.0f, 1000000.0f, 1.0f); // super magenta
} |
90c58ab01bf07796b5329024b3691d2dd2e26a7b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file quantized_pooling.cu
*/
#include <mxnet/operator_util.h>
#include <vector>
#include "../nn/pooling-inl.h"
#include "../mshadow_op.h"
namespace mxnet {
namespace op {
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
template<typename DType>
class QuantizedCuDNNPoolingOp {
public:
QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
}
void Init(const PoolingParam& param, const mxnet::TShape& dshape, const mxnet::TShape& oshape) {
const int N = 0, H = 2, W = 3, C = 1;
const cudnnDataType_t dtype = mshadow::DataType<DType>::kCudnnFlag;
CHECK(param.kernel.ndim() == 2) << "Only support 2D pooling";
if (param.pool_type == pool_enum::kMaxPooling) {
mode_ = CUDNN_POOLING_MAX;
} else if (param.pool_type == pool_enum::kAvgPooling) {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
} else {
LOG(FATAL) << "QuantizedCuDNNPoolingOp only supports pool_type=max/avg";
}
CUDNN_CALL(cudnnSetTensor4dDescriptor(in_desc_,
CUDNN_TENSOR_NCHW,
dtype,
dshape[N],
dshape[C],
dshape[H],
dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc_,
CUDNN_TENSOR_NCHW,
dtype,
oshape[N],
oshape[C],
oshape[H],
oshape[W]));
CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
param.global_pool ? dshape[2] : param.kernel[0],
param.global_pool ? dshape[3] : param.kernel[1],
param.pad[0],
param.pad[1],
param.global_pool ? 1 : param.stride[0],
param.global_pool ? 1 :param.stride[1]));
}
~QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnDestroyTensorDescriptor(in_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc_));
}
void Forward(mshadow::Stream<gpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 3U);
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(s->dnn_handle_ownership_, mshadow::Stream<gpu>::OwnHandle);
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CALL(cudnnPoolingForward(s->dnn_handle_,
pool_desc_,
&alpha,
in_desc_,
inputs[0].dptr_,
&beta,
out_desc_,
outputs[0].dptr_));
Tensor<gpu, 1, float> omin_range = outputs[1].FlatTo1D<gpu, float>(s);
Tensor<gpu, 1, float> omax_range = outputs[2].FlatTo1D<gpu, float>(s);
ASSIGN_DISPATCH(omin_range, req[1],
F<mshadow_op::identity>(inputs[1].FlatTo1D<gpu, float>(s)));
ASSIGN_DISPATCH(omax_range, req[2],
F<mshadow_op::identity>(inputs[2].FlatTo1D<gpu, float>(s)));
}
private:
cudnnPoolingMode_t mode_;
cudnnTensorDescriptor_t in_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnPoolingDescriptor_t pool_desc_;
}; // class QuantizedCuDNNPoolingOp
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
void QuantizedPoolingForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedPoolingForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedCuDNNPoolingOp<int8_t> op;
#else
static MX_THREAD_LOCAL QuantizedCuDNNPoolingOp<int8_t> op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, {inputs[0].shape_}, {outputs[0].shape_});
op.Forward(ctx.get_stream<gpu>(), inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedPoolingForward<gpu> only supports cudnnPoolingForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && TORCH_HIP_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_pooling)
.set_attr<FCompute>("FCompute<gpu>", QuantizedPoolingForwardGPU);
} // namespace op
} // namespace mxnet
| 90c58ab01bf07796b5329024b3691d2dd2e26a7b.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file quantized_pooling.cu
*/
#include <mxnet/operator_util.h>
#include <vector>
#include "../nn/pooling-inl.h"
#include "../mshadow_op.h"
namespace mxnet {
namespace op {
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
template<typename DType>
class QuantizedCuDNNPoolingOp {
public:
QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
}
void Init(const PoolingParam& param, const mxnet::TShape& dshape, const mxnet::TShape& oshape) {
const int N = 0, H = 2, W = 3, C = 1;
const cudnnDataType_t dtype = mshadow::DataType<DType>::kCudnnFlag;
CHECK(param.kernel.ndim() == 2) << "Only support 2D pooling";
if (param.pool_type == pool_enum::kMaxPooling) {
mode_ = CUDNN_POOLING_MAX;
} else if (param.pool_type == pool_enum::kAvgPooling) {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
} else {
LOG(FATAL) << "QuantizedCuDNNPoolingOp only supports pool_type=max/avg";
}
CUDNN_CALL(cudnnSetTensor4dDescriptor(in_desc_,
CUDNN_TENSOR_NCHW,
dtype,
dshape[N],
dshape[C],
dshape[H],
dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc_,
CUDNN_TENSOR_NCHW,
dtype,
oshape[N],
oshape[C],
oshape[H],
oshape[W]));
CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
param.global_pool ? dshape[2] : param.kernel[0],
param.global_pool ? dshape[3] : param.kernel[1],
param.pad[0],
param.pad[1],
param.global_pool ? 1 : param.stride[0],
param.global_pool ? 1 :param.stride[1]));
}
~QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnDestroyTensorDescriptor(in_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc_));
}
void Forward(mshadow::Stream<gpu>* s,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 3U);
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(s->dnn_handle_ownership_, mshadow::Stream<gpu>::OwnHandle);
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CALL(cudnnPoolingForward(s->dnn_handle_,
pool_desc_,
&alpha,
in_desc_,
inputs[0].dptr_,
&beta,
out_desc_,
outputs[0].dptr_));
Tensor<gpu, 1, float> omin_range = outputs[1].FlatTo1D<gpu, float>(s);
Tensor<gpu, 1, float> omax_range = outputs[2].FlatTo1D<gpu, float>(s);
ASSIGN_DISPATCH(omin_range, req[1],
F<mshadow_op::identity>(inputs[1].FlatTo1D<gpu, float>(s)));
ASSIGN_DISPATCH(omax_range, req[2],
F<mshadow_op::identity>(inputs[2].FlatTo1D<gpu, float>(s)));
}
private:
cudnnPoolingMode_t mode_;
cudnnTensorDescriptor_t in_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnPoolingDescriptor_t pool_desc_;
}; // class QuantizedCuDNNPoolingOp
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
void QuantizedPoolingForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedPoolingForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedCuDNNPoolingOp<int8_t> op;
#else
static MX_THREAD_LOCAL QuantizedCuDNNPoolingOp<int8_t> op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, {inputs[0].shape_}, {outputs[0].shape_});
op.Forward(ctx.get_stream<gpu>(), inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedPoolingForward<gpu> only supports cudnnPoolingForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 6 && CUDA_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_pooling)
.set_attr<FCompute>("FCompute<gpu>", QuantizedPoolingForwardGPU);
} // namespace op
} // namespace mxnet
|
3129e62f61f3551f6c68d9594f46a805ea4bec57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.h"
#include <gtest/gtest.h>
#include <linalg/batched/gemv.cuh>
#include <raft/cudart_utils.h>
#include <raft/random/rng.hpp>
#include <test_utils.h>
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchGemvInputs {
T tolerance;
int m, n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream& operator<<(::std::ostream& os, const BatchGemvInputs<T>& dims)
{
return os;
}
template <typename Type>
__global__ void naiveBatchGemvKernel(Type* y, const Type* A, const Type* x, int m, int n)
{
int batch = blockIdx.y;
int row = blockIdx.x;
int col = threadIdx.x;
if (row < m && col < n) {
auto prod = A[batch * m * n + row * n + col] * x[batch * n + col];
raft::myAtomicAdd(y + batch * m + row, prod);
}
}
template <typename Type>
void naiveBatchGemv(
Type* y, const Type* A, const Type* x, int m, int n, int batchSize, hipStream_t stream)
{
static int TPB = raft::ceildiv(n, raft::WarpSize) * raft::WarpSize;
dim3 nblks(m, batchSize);
hipLaunchKernelGGL(( naiveBatchGemvKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, y, A, x, m, n);
RAFT_CUDA_TRY(hipPeekAtLastError());
}
template <typename T>
class BatchGemvTest : public ::testing::TestWithParam<BatchGemvInputs<T>> {
protected:
BatchGemvTest() : out_ref(0, stream), out(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<BatchGemvInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.batchSize * params.m * params.n;
int vecleny = params.batchSize * params.m;
int veclenx = params.batchSize * params.n;
RAFT_CUDA_TRY(hipStreamCreate(&stream));
rmm::device_uvector<T> A(len, stream);
rmm::device_uvector<T> x(veclenx, stream);
out_ref.resize(vecleny, stream);
out.resize(vecleny, stream);
r.uniform(A.data(), len, T(-1.0), T(1.0), stream);
r.uniform(x.data(), veclenx, T(-1.0), T(1.0), stream);
RAFT_CUDA_TRY(hipMemsetAsync(out_ref.data(), 0, sizeof(T) * vecleny, stream));
naiveBatchGemv(
out_ref.data(), A.data(), x.data(), params.m, params.n, params.batchSize, stream);
gemv<T, int>(out.data(),
A.data(),
x.data(),
nullptr,
T(1.0),
T(0.0),
params.m,
params.n,
params.batchSize,
stream);
}
void TearDown() override { RAFT_CUDA_TRY(hipStreamDestroy(stream)); }
protected:
hipStream_t stream = 0;
BatchGemvInputs<T> params;
rmm::device_uvector<T> out_ref;
rmm::device_uvector<T> out;
};
const std::vector<BatchGemvInputs<float>> inputsf = {
{0.005f, 128, 128, 32, 1234ULL},
{0.005f, 128, 126, 32, 1234ULL},
{0.005f, 128, 125, 32, 1234ULL},
{0.005f, 126, 128, 32, 1234ULL},
{0.005f, 126, 126, 32, 1234ULL},
{0.005f, 126, 125, 32, 1234ULL},
{0.005f, 125, 128, 32, 1234ULL},
{0.005f, 125, 126, 32, 1234ULL},
{0.005f, 125, 125, 32, 1234ULL},
};
typedef BatchGemvTest<float> BatchGemvTestF;
TEST_P(BatchGemvTestF, Result)
{
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(
devArrMatch(out_ref.data(), out.data(), vecleny, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestF, ::testing::ValuesIn(inputsf));
typedef BatchGemvTest<double> BatchGemvTestD;
const std::vector<BatchGemvInputs<double>> inputsd = {
{0.0000001, 128, 128, 32, 1234ULL},
{0.0000001, 128, 126, 32, 1234ULL},
{0.0000001, 128, 125, 32, 1234ULL},
{0.0000001, 126, 128, 32, 1234ULL},
{0.0000001, 126, 126, 32, 1234ULL},
{0.0000001, 126, 125, 32, 1234ULL},
{0.0000001, 125, 128, 32, 1234ULL},
{0.0000001, 125, 126, 32, 1234ULL},
{0.0000001, 125, 125, 32, 1234ULL},
};
TEST_P(BatchGemvTestD, Result)
{
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), vecleny, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestD, ::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
| 3129e62f61f3551f6c68d9594f46a805ea4bec57.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.h"
#include <gtest/gtest.h>
#include <linalg/batched/gemv.cuh>
#include <raft/cudart_utils.h>
#include <raft/random/rng.hpp>
#include <test_utils.h>
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchGemvInputs {
T tolerance;
int m, n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream& operator<<(::std::ostream& os, const BatchGemvInputs<T>& dims)
{
return os;
}
template <typename Type>
__global__ void naiveBatchGemvKernel(Type* y, const Type* A, const Type* x, int m, int n)
{
int batch = blockIdx.y;
int row = blockIdx.x;
int col = threadIdx.x;
if (row < m && col < n) {
auto prod = A[batch * m * n + row * n + col] * x[batch * n + col];
raft::myAtomicAdd(y + batch * m + row, prod);
}
}
template <typename Type>
void naiveBatchGemv(
Type* y, const Type* A, const Type* x, int m, int n, int batchSize, cudaStream_t stream)
{
static int TPB = raft::ceildiv(n, raft::WarpSize) * raft::WarpSize;
dim3 nblks(m, batchSize);
naiveBatchGemvKernel<Type><<<nblks, TPB, 0, stream>>>(y, A, x, m, n);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename T>
class BatchGemvTest : public ::testing::TestWithParam<BatchGemvInputs<T>> {
protected:
BatchGemvTest() : out_ref(0, stream), out(0, stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<BatchGemvInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.batchSize * params.m * params.n;
int vecleny = params.batchSize * params.m;
int veclenx = params.batchSize * params.n;
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
rmm::device_uvector<T> A(len, stream);
rmm::device_uvector<T> x(veclenx, stream);
out_ref.resize(vecleny, stream);
out.resize(vecleny, stream);
r.uniform(A.data(), len, T(-1.0), T(1.0), stream);
r.uniform(x.data(), veclenx, T(-1.0), T(1.0), stream);
RAFT_CUDA_TRY(cudaMemsetAsync(out_ref.data(), 0, sizeof(T) * vecleny, stream));
naiveBatchGemv(
out_ref.data(), A.data(), x.data(), params.m, params.n, params.batchSize, stream);
gemv<T, int>(out.data(),
A.data(),
x.data(),
nullptr,
T(1.0),
T(0.0),
params.m,
params.n,
params.batchSize,
stream);
}
void TearDown() override { RAFT_CUDA_TRY(cudaStreamDestroy(stream)); }
protected:
cudaStream_t stream = 0;
BatchGemvInputs<T> params;
rmm::device_uvector<T> out_ref;
rmm::device_uvector<T> out;
};
const std::vector<BatchGemvInputs<float>> inputsf = {
{0.005f, 128, 128, 32, 1234ULL},
{0.005f, 128, 126, 32, 1234ULL},
{0.005f, 128, 125, 32, 1234ULL},
{0.005f, 126, 128, 32, 1234ULL},
{0.005f, 126, 126, 32, 1234ULL},
{0.005f, 126, 125, 32, 1234ULL},
{0.005f, 125, 128, 32, 1234ULL},
{0.005f, 125, 126, 32, 1234ULL},
{0.005f, 125, 125, 32, 1234ULL},
};
typedef BatchGemvTest<float> BatchGemvTestF;
TEST_P(BatchGemvTestF, Result)
{
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(
devArrMatch(out_ref.data(), out.data(), vecleny, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestF, ::testing::ValuesIn(inputsf));
typedef BatchGemvTest<double> BatchGemvTestD;
const std::vector<BatchGemvInputs<double>> inputsd = {
{0.0000001, 128, 128, 32, 1234ULL},
{0.0000001, 128, 126, 32, 1234ULL},
{0.0000001, 128, 125, 32, 1234ULL},
{0.0000001, 126, 128, 32, 1234ULL},
{0.0000001, 126, 126, 32, 1234ULL},
{0.0000001, 126, 125, 32, 1234ULL},
{0.0000001, 125, 128, 32, 1234ULL},
{0.0000001, 125, 126, 32, 1234ULL},
{0.0000001, 125, 125, 32, 1234ULL},
};
TEST_P(BatchGemvTestD, Result)
{
int vecleny = params.batchSize * params.m;
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), vecleny, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchGemvTests, BatchGemvTestD, ::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
|
52ee3812a4b20cf1c0ed018a73cb0904dd020303.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_ppcg_inner2_kernel;
int xdim0_tea_leaf_ppcg_inner2_kernel_h = -1;
__constant__ int xdim1_tea_leaf_ppcg_inner2_kernel;
int xdim1_tea_leaf_ppcg_inner2_kernel_h = -1;
__constant__ int xdim2_tea_leaf_ppcg_inner2_kernel;
int xdim2_tea_leaf_ppcg_inner2_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y) (x + xdim0_tea_leaf_ppcg_inner2_kernel * (y))
#define OPS_ACC1(x, y) (x + xdim1_tea_leaf_ppcg_inner2_kernel * (y))
#define OPS_ACC2(x, y) (x + xdim2_tea_leaf_ppcg_inner2_kernel * (y))
// user function
__device__
void
tea_leaf_ppcg_inner2_kernel_gpu(double *sd, double *utemp, const double *z,
const double *alpha, const double *beta) {
sd[OPS_ACC0(0, 0)] =
(*alpha) * sd[OPS_ACC0(0, 0)] + (*beta) * z[OPS_ACC2(0, 0)];
utemp[OPS_ACC1(0, 0)] = utemp[OPS_ACC1(0, 0)] + sd[OPS_ACC0(0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_tea_leaf_ppcg_inner2_kernel(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double arg3,
const double arg4, int size0,
int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_ppcg_inner2_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_ppcg_inner2_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_ppcg_inner2_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_ppcg_inner2_kernel_gpu(arg0, arg1, arg2, &arg3, &arg4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_inner2_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4) {
#else
void ops_par_loop_tea_leaf_ppcg_inner2_kernel_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 5, range, 47))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(47, "tea_leaf_ppcg_inner2_kernel");
OPS_kernels[47].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_tea_leaf_ppcg_inner2_kernel_h ||
xdim1 != xdim1_tea_leaf_ppcg_inner2_kernel_h ||
xdim2 != xdim2_tea_leaf_ppcg_inner2_kernel_h) {
hipMemcpyToSymbol(xdim0_tea_leaf_ppcg_inner2_kernel, &xdim0, sizeof(int));
xdim0_tea_leaf_ppcg_inner2_kernel_h = xdim0;
hipMemcpyToSymbol(xdim1_tea_leaf_ppcg_inner2_kernel, &xdim1, sizeof(int));
xdim1_tea_leaf_ppcg_inner2_kernel_h = xdim1;
hipMemcpyToSymbol(xdim2_tea_leaf_ppcg_inner2_kernel, &xdim2, sizeof(int));
xdim2_tea_leaf_ppcg_inner2_kernel_h = xdim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[5];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[47].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_tea_leaf_ppcg_inner2_kernel), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2],
*(double *)arg3.data, *(double *)arg4.data, x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[47].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[47].mpi_time += t2 - t1;
OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_inner2_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 47;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 47;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg *)malloc(5 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
char *tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg3.data, 1 * sizeof(double));
desc->args[3].data = tmp;
desc->args[4] = arg4;
tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg4.data, 1 * sizeof(double));
desc->args[4].data = tmp;
desc->function = ops_par_loop_tea_leaf_ppcg_inner2_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(47, "tea_leaf_ppcg_inner2_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 52ee3812a4b20cf1c0ed018a73cb0904dd020303.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_tea_leaf_ppcg_inner2_kernel;
int xdim0_tea_leaf_ppcg_inner2_kernel_h = -1;
__constant__ int xdim1_tea_leaf_ppcg_inner2_kernel;
int xdim1_tea_leaf_ppcg_inner2_kernel_h = -1;
__constant__ int xdim2_tea_leaf_ppcg_inner2_kernel;
int xdim2_tea_leaf_ppcg_inner2_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#define OPS_ACC0(x, y) (x + xdim0_tea_leaf_ppcg_inner2_kernel * (y))
#define OPS_ACC1(x, y) (x + xdim1_tea_leaf_ppcg_inner2_kernel * (y))
#define OPS_ACC2(x, y) (x + xdim2_tea_leaf_ppcg_inner2_kernel * (y))
// user function
__device__
void
tea_leaf_ppcg_inner2_kernel_gpu(double *sd, double *utemp, const double *z,
const double *alpha, const double *beta) {
sd[OPS_ACC0(0, 0)] =
(*alpha) * sd[OPS_ACC0(0, 0)] + (*beta) * z[OPS_ACC2(0, 0)];
utemp[OPS_ACC1(0, 0)] = utemp[OPS_ACC1(0, 0)] + sd[OPS_ACC0(0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
__global__ void ops_tea_leaf_ppcg_inner2_kernel(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double arg3,
const double arg4, int size0,
int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_tea_leaf_ppcg_inner2_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_tea_leaf_ppcg_inner2_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_tea_leaf_ppcg_inner2_kernel;
if (idx_x < size0 && idx_y < size1) {
tea_leaf_ppcg_inner2_kernel_gpu(arg0, arg1, arg2, &arg3, &arg4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_inner2_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4) {
#else
void ops_par_loop_tea_leaf_ppcg_inner2_kernel_execute(
ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 5, range, 47))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(47, "tea_leaf_ppcg_inner2_kernel");
OPS_kernels[47].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
if (xdim0 != xdim0_tea_leaf_ppcg_inner2_kernel_h ||
xdim1 != xdim1_tea_leaf_ppcg_inner2_kernel_h ||
xdim2 != xdim2_tea_leaf_ppcg_inner2_kernel_h) {
cudaMemcpyToSymbol(xdim0_tea_leaf_ppcg_inner2_kernel, &xdim0, sizeof(int));
xdim0_tea_leaf_ppcg_inner2_kernel_h = xdim0;
cudaMemcpyToSymbol(xdim1_tea_leaf_ppcg_inner2_kernel, &xdim1, sizeof(int));
xdim1_tea_leaf_ppcg_inner2_kernel_h = xdim1;
cudaMemcpyToSymbol(xdim2_tea_leaf_ppcg_inner2_kernel, &xdim2, sizeof(int));
xdim2_tea_leaf_ppcg_inner2_kernel_h = xdim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
char *p_a[5];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args, 5, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[47].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_tea_leaf_ppcg_inner2_kernel<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2],
*(double *)arg3.data, *(double *)arg4.data, x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[47].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[47].mpi_time += t2 - t1;
OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[47].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
#ifdef OPS_LAZY
void ops_par_loop_tea_leaf_ppcg_inner2_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 47;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 47;
for (int i = 0; i < 4; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg *)malloc(5 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
char *tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg3.data, 1 * sizeof(double));
desc->args[3].data = tmp;
desc->args[4] = arg4;
tmp = (char *)malloc(1 * sizeof(double));
memcpy(tmp, arg4.data, 1 * sizeof(double));
desc->args[4].data = tmp;
desc->function = ops_par_loop_tea_leaf_ppcg_inner2_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(47, "tea_leaf_ppcg_inner2_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
f3a0bfaf8dc132d56e0c13ee8802892494440811.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathMagma.hip"
#else
#include <c10/hip/HIPException.h>
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
THCudaCheck(hipMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
THCudaCheck(hipMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
THCudaCheck(hipMemcpyAsync(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, hipMemcpyDeviceToHost, stream));
AT_CUDA_CHECK(hipStreamSynchronize(stream));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
}
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
#endif
#endif
| f3a0bfaf8dc132d56e0c13ee8802892494440811.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathMagma.cu"
#else
#include <c10/cuda/CUDAException.h>
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
auto stream = c10::cuda::getCurrentCUDAStream();
THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
auto stream = c10::cuda::getCurrentCUDAStream();
THCudaCheck(cudaMemcpyAsync(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
auto stream = c10::cuda::getCurrentCUDAStream();
THCudaCheck(cudaMemcpyAsync(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost, stream));
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
{
at::native::MagmaStreamSyncGuard guard;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
}
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
#endif
#endif
|
c28472d933a59ea2ef657d21cc0633203edc76de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/ziteric.cu, normal z -> d, Tue Aug 30 09:38:46 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
__global__ void
magma_diteric_csr_kernel(
magma_int_t n,
magma_int_t nnz,
magma_index_t *Arowidx,
magma_index_t *Acolidx,
const double * __restrict__ A_val,
magma_index_t *rowptr,
magma_index_t *colidx,
double *val )
{
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double s, sp;
int il, iu, jl, ju;
if ( k < nnz )
{
i = Arowidx[k];
j = Acolidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1])
{
sp = zero;
jl = colidx[il];
ju = colidx[iu];
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else
{
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
// modify entry
if (i == j)
val[il-1] = MAGMA_D_MAKE( sqrt( fabs( MAGMA_D_REAL(s) )), 0.0 );
else
val[il-1] = s / val[iu-1];
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete Cholesky factorization.
The idea is according to Edmond Chow's presentation at SIAM 2014.
This routine was used in the ISC 2015 paper:
E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm
for Computing Incomplete Factorizations on GPUs'
The input format of the initial guess matrix A is Magma_CSRCOO,
A_CSR is CSR or CSRCOO format.
Arguments
---------
@param[in]
A magma_d_matrix
input matrix A - initial guess (lower triangular)
@param[in,out]
A_CSR magma_d_matrix
input/output matrix containing the IC approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_diteric_csr(
magma_d_matrix A,
magma_d_matrix A_CSR,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
// Runtime API
// hipFuncCachePreferShared: shared memory is 48 KB
// hipFuncCachePreferEqual: shared memory is 32 KB
// hipFuncCachePreferL1: shared memory is 16 KB
// hipFuncCachePreferNone: no preference
//hipFuncSetCacheConfig(hipFuncCachePreferShared);
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_diteric_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
A_CSR.row, A_CSR.col, A_CSR.val );
return MAGMA_SUCCESS;
}
| c28472d933a59ea2ef657d21cc0633203edc76de.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/ziteric.cu, normal z -> d, Tue Aug 30 09:38:46 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_d
__global__ void
magma_diteric_csr_kernel(
magma_int_t n,
magma_int_t nnz,
magma_index_t *Arowidx,
magma_index_t *Acolidx,
const double * __restrict__ A_val,
magma_index_t *rowptr,
magma_index_t *colidx,
double *val )
{
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz;
double zero = MAGMA_D_MAKE(0.0, 0.0);
double s, sp;
int il, iu, jl, ju;
if ( k < nnz )
{
i = Arowidx[k];
j = Acolidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1])
{
sp = zero;
jl = colidx[il];
ju = colidx[iu];
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else
{
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
// modify entry
if (i == j)
val[il-1] = MAGMA_D_MAKE( sqrt( fabs( MAGMA_D_REAL(s) )), 0.0 );
else
val[il-1] = s / val[iu-1];
}
}// kernel
/**
Purpose
-------
This routine iteratively computes an incomplete Cholesky factorization.
The idea is according to Edmond Chow's presentation at SIAM 2014.
This routine was used in the ISC 2015 paper:
E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm
for Computing Incomplete Factorizations on GPUs'
The input format of the initial guess matrix A is Magma_CSRCOO,
A_CSR is CSR or CSRCOO format.
Arguments
---------
@param[in]
A magma_d_matrix
input matrix A - initial guess (lower triangular)
@param[in,out]
A_CSR magma_d_matrix
input/output matrix containing the IC approximation
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_diteric_csr(
magma_d_matrix A,
magma_d_matrix A_CSR,
magma_queue_t queue )
{
int blocksize1 = 128;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
// Runtime API
// cudaFuncCachePreferShared: shared memory is 48 KB
// cudaFuncCachePreferEqual: shared memory is 32 KB
// cudaFuncCachePreferL1: shared memory is 16 KB
// cudaFuncCachePreferNone: no preference
//cudaFuncSetCacheConfig(cudaFuncCachePreferShared);
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_diteric_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( A.num_rows, A.nnz,
A.rowidx, A.col, A.val,
A_CSR.row, A_CSR.col, A_CSR.val );
return MAGMA_SUCCESS;
}
|
78ef955b693710f75521d765be716cfde2b510bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include <thrust/functional.h>
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<class BinaryOp>
__global__ void THCudaTensor_kernel_scanOuterDim(float *tgt_, float *src_,
unsigned num_orows, unsigned num_irows, unsigned row_size,
float init, BinaryOp binary_op)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
float *src = src_ + orow * row_size * num_irows + irow;
float *tgt = tgt_ + orow * row_size * num_irows + irow;
float acc = init;
for (unsigned col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
template<class BinaryOp>
__host__ void THCudaTensor_scanOuterDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, long dimension,
float init, BinaryOp binary_op)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (long dim = 0; dim < dimension; dim++) {
num_orows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x)));
hipLaunchKernelGGL(( THCudaTensor_kernel_scanOuterDim), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size, init, binary_op);
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void THCudaTensor_kernel_scanInnermostDim(float *tgt_, float *src_,
unsigned num_rows, unsigned row_size,
float init, BinaryFunction binary_op)
{
__shared__ float sbuf[num_threads_y][2 * num_threads_x];
float* row_buf = sbuf[threadIdx.y];
for (unsigned block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
float block_total = init;
float *row_src = src_ + row * row_size;
float *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (unsigned block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
unsigned col1 = block_col + threadIdx.x;
unsigned col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (unsigned s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
unsigned offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (unsigned s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
unsigned offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template<class BinaryFunction>
__host__ void THCudaTensor_scanInnermostDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, float init, BinaryFunction binary_op)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, ndim - 1);
dim3 threads(16, 32);
dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y)));
hipLaunchKernelGGL(( THCudaTensor_kernel_scanInnermostDim<16, 32>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size, init, binary_op);
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
template<class BinaryFunction>
void THCudaTensor_scanDim(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, float init, BinaryFunction binary_op)
{
THCudaTensor_resizeAs(state, self_, src);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_scanInnermostDim(state, self, src, init, binary_op);
} else {
THCudaTensor_scanOuterDim(state, self, src, dimension, init, binary_op);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
void THCudaTensor_cumsum(THCState *state, THCudaTensor *self, THCudaTensor *src, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
return THCudaTensor_scanDim(state, self, src, dimension, 0.0f, thrust::plus<float>());
}
void THCudaTensor_cumprod(THCState *state, THCudaTensor *self, THCudaTensor *src, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
return THCudaTensor_scanDim(state, self, src, dimension, 1.0f, thrust::multiplies<float>());
}
| 78ef955b693710f75521d765be716cfde2b510bd.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include <thrust/functional.h>
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<class BinaryOp>
__global__ void THCudaTensor_kernel_scanOuterDim(float *tgt_, float *src_,
unsigned num_orows, unsigned num_irows, unsigned row_size,
float init, BinaryOp binary_op)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
float *src = src_ + orow * row_size * num_irows + irow;
float *tgt = tgt_ + orow * row_size * num_irows + irow;
float acc = init;
for (unsigned col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
template<class BinaryOp>
__host__ void THCudaTensor_scanOuterDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, long dimension,
float init, BinaryOp binary_op)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (long dim = 0; dim < dimension; dim++) {
num_orows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, THCCeilDiv(num_irows, threads.x)));
THCudaTensor_kernel_scanOuterDim<<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size, init, binary_op);
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void THCudaTensor_kernel_scanInnermostDim(float *tgt_, float *src_,
unsigned num_rows, unsigned row_size,
float init, BinaryFunction binary_op)
{
__shared__ float sbuf[num_threads_y][2 * num_threads_x];
float* row_buf = sbuf[threadIdx.y];
for (unsigned block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
float block_total = init;
float *row_src = src_ + row * row_size;
float *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (unsigned block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
unsigned col1 = block_col + threadIdx.x;
unsigned col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (unsigned s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
unsigned offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (unsigned s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
unsigned offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template<class BinaryFunction>
__host__ void THCudaTensor_scanInnermostDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, float init, BinaryFunction binary_op)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, ndim - 1);
dim3 threads(16, 32);
dim3 grid(min(1024, THCCeilDiv(num_rows, threads.y)));
THCudaTensor_kernel_scanInnermostDim<16, 32><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size, init, binary_op);
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
template<class BinaryFunction>
void THCudaTensor_scanDim(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, float init, BinaryFunction binary_op)
{
THCudaTensor_resizeAs(state, self_, src);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_scanInnermostDim(state, self, src, init, binary_op);
} else {
THCudaTensor_scanOuterDim(state, self, src, dimension, init, binary_op);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
void THCudaTensor_cumsum(THCState *state, THCudaTensor *self, THCudaTensor *src, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
return THCudaTensor_scanDim(state, self, src, dimension, 0.0f, thrust::plus<float>());
}
void THCudaTensor_cumprod(THCState *state, THCudaTensor *self, THCudaTensor *src, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
return THCudaTensor_scanDim(state, self, src, dimension, 1.0f, thrust::multiplies<float>());
}
|
e3ead60641202fc2715569a17fcdb84241606239.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/TensorAccessor.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHGeneral.h>
#include <utility>
#include "cub_utils.cuh"
template <typename ScalarT, template<typename> typename PtrTraits, typename IndexT>
struct ScopesToOffsetIterator : std::iterator<std::random_access_iterator_tag, ScalarT> {
at::PackedTensorAccessor<ScalarT, 2, PtrTraits, IndexT> scopes;
bool is_end;
ScopesToOffsetIterator(at::PackedTensorAccessor<ScalarT, 2, PtrTraits, IndexT> const &scopes, bool is_end)
: scopes(scopes), is_end(is_end) {}
__host__ __device__ __forceinline__ ScalarT operator[](IndexT idx) const {
auto result = scopes[idx][0];
if (is_end) {
result += scopes[idx][1];
}
return result;
}
};
template <typename ScalarT, template <typename> typename PtrTraits, typename IndexT>
ScopesToOffsetIterator<ScalarT, PtrTraits, IndexT> make_scopes_to_offset_iterator(
at::PackedTensorAccessor<ScalarT, 2, PtrTraits, IndexT> const &scopes, bool is_end) {
return ScopesToOffsetIterator<ScalarT, PtrTraits, IndexT>(scopes, is_end);
}
std::tuple<at::Tensor, at::Tensor> segment_argmax_gpu(at::Tensor const &values,
at::Tensor const &scopes) {
auto result_values = at::empty({scopes.size(0)}, values.options());
auto result_locations =
at::empty({scopes.size(0)}, scopes.options().dtype(c10::ScalarType::Int));
auto scopes_accessor =
scopes.packed_accessor<std::int64_t, 2, at::DefaultPtrTraits, std::uint32_t>();
auto offsets_start = make_scopes_to_offset_iterator(scopes_accessor, false);
auto offsets_end = make_scopes_to_offset_iterator(scopes_accessor, true);
AT_DISPATCH_FLOATING_TYPES(values.scalar_type(), "segment_argmax_gpu", [&]() {
segment_argmax_gpu_impl<scalar_t>(
values.data<scalar_t>(), offsets_start, offsets_end,
result_values.data<scalar_t>(), result_locations.data<std::int32_t>(),
scopes.size(0));
});
return std::make_tuple(result_values, result_locations.toType(scopes.scalar_type()));
}
struct LogAddExp {
template <typename T> __host__ __device__ __forceinline__ T operator()(T a, T b) {
if (a < b) {
T c(a);
a = b;
b = c;
}
return log1p(exp(b - a)) + a;
}
};
template <typename T, typename Accessor>
void segment_logsumexp_gpu_impl(T *values, Accessor const &scopes, T *out_values) {
void *temp_storage = nullptr;
size_t temp_storage_bytes = 0;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto offsets_start = make_scopes_to_offset_iterator(scopes, false);
auto offsets_end = make_scopes_to_offset_iterator(scopes, true);
auto init_value = std::numeric_limits<T>::lowest();
THCudaCheck(hipcub::DeviceSegmentedReduce::Reduce(
temp_storage, temp_storage_bytes, values, out_values,
scopes.size(0), offsets_start, offsets_end, LogAddExp{}, init_value, stream));
THCudaCheck(hipMalloc(&temp_storage, temp_storage_bytes));
THCudaCheck(hipcub::DeviceSegmentedReduce::Reduce(
temp_storage, temp_storage_bytes, values, out_values,
scopes.size(0), offsets_start, offsets_end, LogAddExp{}, init_value, stream));
THCudaCheck(hipFree(temp_storage));
}
at::Tensor segment_logsumexp_gpu(at::Tensor const &values, at::Tensor const &scopes) {
auto result = at::empty({scopes.size(0)}, values.options());
AT_DISPATCH_FLOATING_TYPES(values.scalar_type(), "segment_logsumexp_gpu", [&]() {
segment_logsumexp_gpu_impl<scalar_t>(
values.data<scalar_t>(),
scopes.packed_accessor<std::int64_t, 2, at::RestrictPtrTraits, std::uint32_t>(),
result.data<scalar_t>());
});
return result;
}
| e3ead60641202fc2715569a17fcdb84241606239.cu | #include <ATen/ATen.h>
#include <ATen/TensorAccessor.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCGeneral.h>
#include <utility>
#include "cub_utils.cuh"
template <typename ScalarT, template<typename> typename PtrTraits, typename IndexT>
struct ScopesToOffsetIterator : std::iterator<std::random_access_iterator_tag, ScalarT> {
at::PackedTensorAccessor<ScalarT, 2, PtrTraits, IndexT> scopes;
bool is_end;
ScopesToOffsetIterator(at::PackedTensorAccessor<ScalarT, 2, PtrTraits, IndexT> const &scopes, bool is_end)
: scopes(scopes), is_end(is_end) {}
__host__ __device__ __forceinline__ ScalarT operator[](IndexT idx) const {
auto result = scopes[idx][0];
if (is_end) {
result += scopes[idx][1];
}
return result;
}
};
template <typename ScalarT, template <typename> typename PtrTraits, typename IndexT>
ScopesToOffsetIterator<ScalarT, PtrTraits, IndexT> make_scopes_to_offset_iterator(
at::PackedTensorAccessor<ScalarT, 2, PtrTraits, IndexT> const &scopes, bool is_end) {
return ScopesToOffsetIterator<ScalarT, PtrTraits, IndexT>(scopes, is_end);
}
std::tuple<at::Tensor, at::Tensor> segment_argmax_gpu(at::Tensor const &values,
at::Tensor const &scopes) {
auto result_values = at::empty({scopes.size(0)}, values.options());
auto result_locations =
at::empty({scopes.size(0)}, scopes.options().dtype(c10::ScalarType::Int));
auto scopes_accessor =
scopes.packed_accessor<std::int64_t, 2, at::DefaultPtrTraits, std::uint32_t>();
auto offsets_start = make_scopes_to_offset_iterator(scopes_accessor, false);
auto offsets_end = make_scopes_to_offset_iterator(scopes_accessor, true);
AT_DISPATCH_FLOATING_TYPES(values.scalar_type(), "segment_argmax_gpu", [&]() {
segment_argmax_gpu_impl<scalar_t>(
values.data<scalar_t>(), offsets_start, offsets_end,
result_values.data<scalar_t>(), result_locations.data<std::int32_t>(),
scopes.size(0));
});
return std::make_tuple(result_values, result_locations.toType(scopes.scalar_type()));
}
struct LogAddExp {
template <typename T> __host__ __device__ __forceinline__ T operator()(T a, T b) {
if (a < b) {
T c(a);
a = b;
b = c;
}
return log1p(exp(b - a)) + a;
}
};
template <typename T, typename Accessor>
void segment_logsumexp_gpu_impl(T *values, Accessor const &scopes, T *out_values) {
void *temp_storage = nullptr;
size_t temp_storage_bytes = 0;
auto stream = at::cuda::getCurrentCUDAStream();
auto offsets_start = make_scopes_to_offset_iterator(scopes, false);
auto offsets_end = make_scopes_to_offset_iterator(scopes, true);
auto init_value = std::numeric_limits<T>::lowest();
THCudaCheck(cub::DeviceSegmentedReduce::Reduce(
temp_storage, temp_storage_bytes, values, out_values,
scopes.size(0), offsets_start, offsets_end, LogAddExp{}, init_value, stream));
THCudaCheck(cudaMalloc(&temp_storage, temp_storage_bytes));
THCudaCheck(cub::DeviceSegmentedReduce::Reduce(
temp_storage, temp_storage_bytes, values, out_values,
scopes.size(0), offsets_start, offsets_end, LogAddExp{}, init_value, stream));
THCudaCheck(cudaFree(temp_storage));
}
at::Tensor segment_logsumexp_gpu(at::Tensor const &values, at::Tensor const &scopes) {
auto result = at::empty({scopes.size(0)}, values.options());
AT_DISPATCH_FLOATING_TYPES(values.scalar_type(), "segment_logsumexp_gpu", [&]() {
segment_logsumexp_gpu_impl<scalar_t>(
values.data<scalar_t>(),
scopes.packed_accessor<std::int64_t, 2, at::RestrictPtrTraits, std::uint32_t>(),
result.data<scalar_t>());
});
return result;
}
|
5484e87362e08c27fe4f8e1d36fb4e8452689b10.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2014. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2014 Martin Uecker <[email protected]>
*/
#include <complex.h>
#include <assert.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include "misc/misc.h"
#include "wl3-cuda.h"
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(_Complex float)
#endif
__device__ long Wdot(dim3 a, dim3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__device__ dim3 Wpmuladd(dim3 a, dim3 b, dim3 c)
{
dim3 r(a.x * b.x + c.x, a.y * b.y + c.y, a.z * b.z + c.z);
return r;
}
__device__ dim3 Wpmul(dim3 a, dim3 b)
{
dim3 r(a.x * b.x, a.y * b.y, a.z * b.z);
return r;
}
__host__ __device__ int bandsize(unsigned int imsize, unsigned int flen)
{
return (imsize + flen - 1) / 2;
}
__host__ __device__ int coord(int l, int x, int flen, int k)
{
int n = 2 * l + 1 - (flen - 1) + k;
if (n < 0)
n = -n - 1;
if (n >= x)
n = x - 1 - (n - x);
return n;
}
__global__ void kern_down3(dim3 dims, dim3 ostr, cuFloatComplex* out, dim3 istr, const cuFloatComplex* in, unsigned int flen, const float* filter)
{
dim3 ind = Wpmuladd(blockIdx, blockDim, threadIdx);
if ((ind.x >= dims.x) || (ind.y >= bandsize(dims.y, flen)) || (ind.z >= dims.z))
return;
cuFloatComplex y = make_cuFloatComplex(0., 0.);
for (unsigned int l = 0; l < flen; l++) {
int n = coord(ind.y, dims.y, flen, l);
dim3 ac = ind;
ac.y = n;
y.x += in[Wdot(ac, istr)].x * filter[flen - l - 1];
y.y += in[Wdot(ac, istr)].y * filter[flen - l - 1];
}
out[Wdot(ind, ostr)] = y;
}
__global__ void kern_up3(dim3 dims, dim3 ostr, cuFloatComplex* out, dim3 istr, const cuFloatComplex* in, unsigned int flen, const float* filter)
{
dim3 ind = Wpmuladd(blockIdx, blockDim, threadIdx);
if ((ind.x >= dims.x) || (ind.y >= dims.y) || (ind.z >= dims.z))
return;
// cuFloatComplex y = make_cuFloatComplex(0., 0.);
cuFloatComplex y = out[Wdot(ind, ostr)];
int odd = (ind.y + 1) % 2;
for (unsigned int l = odd; l < flen; l += 2) {
int j = (ind.y + l - 1) / 2;
dim3 ac = ind;
ac.y = j;
if ((0 <= j) && ((unsigned int)j < bandsize(dims.y, flen))) {
y.x += in[Wdot(ac, istr)].x * filter[flen - l - 1];
y.y += in[Wdot(ac, istr)].y * filter[flen - l - 1];
}
}
out[Wdot(ind, ostr)] = y;
}
// extern "C" size_t cuda_shared_mem;
extern "C" void wl3_cuda_down3(const long dims[3], const long out_str[3], _Complex float* out, const long in_str[3], const _Complex float* in, unsigned int flen, const float filter[__VLA(flen)])
{
dim3 dims3(dims[0], dims[1], dims[2]);
dim3 ostrs(out_str[0] / CFL_SIZE, out_str[1] / CFL_SIZE, out_str[2] / CFL_SIZE);
dim3 istrs(in_str[0] / CFL_SIZE, in_str[1] / CFL_SIZE, in_str[2] / CFL_SIZE);
long d1 = bandsize(dims[1], flen);
int T = 8;
dim3 th(T, T, T);
dim3 bl((dims[0] + T - 1) / T, (d1 + T - 1) / T, (dims[2] + T - 1) / T);
hipLaunchKernelGGL(( kern_down3), dim3(bl), dim3(th) , 0, 0, dims3, ostrs, (cuFloatComplex*)out, istrs, (const cuFloatComplex*)in, flen, filter);
}
extern "C" void wl3_cuda_up3(const long dims[3], const long out_str[3], _Complex float* out, const long in_str[3], const _Complex float* in, unsigned int flen, const float filter[__VLA(flen)])
{
dim3 dims3(dims[0], dims[1], dims[2]);
dim3 ostrs(out_str[0] / CFL_SIZE, out_str[1] / CFL_SIZE, out_str[2] / CFL_SIZE);
dim3 istrs(in_str[0] / CFL_SIZE, in_str[1] / CFL_SIZE, in_str[2] / CFL_SIZE);
int T = 8;
dim3 th(T, T, T);
dim3 bl((dims[0] + T - 1) / T, (dims[1] + T - 1) / T, (dims[2] + T - 1) / T);
hipLaunchKernelGGL(( kern_up3), dim3(bl), dim3(th) , 0, 0, dims3, ostrs, (cuFloatComplex*)out, istrs, (const cuFloatComplex*)in, flen, filter);
}
| 5484e87362e08c27fe4f8e1d36fb4e8452689b10.cu | /* Copyright 2014. The Regents of the University of California.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2014 Martin Uecker <[email protected]>
*/
#include <complex.h>
#include <assert.h>
#include <stdbool.h>
#include <cuda.h>
#include <cuComplex.h>
#include "misc/misc.h"
#include "wl3-cuda.h"
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(_Complex float)
#endif
__device__ long Wdot(dim3 a, dim3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__device__ dim3 Wpmuladd(dim3 a, dim3 b, dim3 c)
{
dim3 r(a.x * b.x + c.x, a.y * b.y + c.y, a.z * b.z + c.z);
return r;
}
__device__ dim3 Wpmul(dim3 a, dim3 b)
{
dim3 r(a.x * b.x, a.y * b.y, a.z * b.z);
return r;
}
__host__ __device__ int bandsize(unsigned int imsize, unsigned int flen)
{
return (imsize + flen - 1) / 2;
}
__host__ __device__ int coord(int l, int x, int flen, int k)
{
int n = 2 * l + 1 - (flen - 1) + k;
if (n < 0)
n = -n - 1;
if (n >= x)
n = x - 1 - (n - x);
return n;
}
__global__ void kern_down3(dim3 dims, dim3 ostr, cuFloatComplex* out, dim3 istr, const cuFloatComplex* in, unsigned int flen, const float* filter)
{
dim3 ind = Wpmuladd(blockIdx, blockDim, threadIdx);
if ((ind.x >= dims.x) || (ind.y >= bandsize(dims.y, flen)) || (ind.z >= dims.z))
return;
cuFloatComplex y = make_cuFloatComplex(0., 0.);
for (unsigned int l = 0; l < flen; l++) {
int n = coord(ind.y, dims.y, flen, l);
dim3 ac = ind;
ac.y = n;
y.x += in[Wdot(ac, istr)].x * filter[flen - l - 1];
y.y += in[Wdot(ac, istr)].y * filter[flen - l - 1];
}
out[Wdot(ind, ostr)] = y;
}
__global__ void kern_up3(dim3 dims, dim3 ostr, cuFloatComplex* out, dim3 istr, const cuFloatComplex* in, unsigned int flen, const float* filter)
{
dim3 ind = Wpmuladd(blockIdx, blockDim, threadIdx);
if ((ind.x >= dims.x) || (ind.y >= dims.y) || (ind.z >= dims.z))
return;
// cuFloatComplex y = make_cuFloatComplex(0., 0.);
cuFloatComplex y = out[Wdot(ind, ostr)];
int odd = (ind.y + 1) % 2;
for (unsigned int l = odd; l < flen; l += 2) {
int j = (ind.y + l - 1) / 2;
dim3 ac = ind;
ac.y = j;
if ((0 <= j) && ((unsigned int)j < bandsize(dims.y, flen))) {
y.x += in[Wdot(ac, istr)].x * filter[flen - l - 1];
y.y += in[Wdot(ac, istr)].y * filter[flen - l - 1];
}
}
out[Wdot(ind, ostr)] = y;
}
// extern "C" size_t cuda_shared_mem;
extern "C" void wl3_cuda_down3(const long dims[3], const long out_str[3], _Complex float* out, const long in_str[3], const _Complex float* in, unsigned int flen, const float filter[__VLA(flen)])
{
dim3 dims3(dims[0], dims[1], dims[2]);
dim3 ostrs(out_str[0] / CFL_SIZE, out_str[1] / CFL_SIZE, out_str[2] / CFL_SIZE);
dim3 istrs(in_str[0] / CFL_SIZE, in_str[1] / CFL_SIZE, in_str[2] / CFL_SIZE);
long d1 = bandsize(dims[1], flen);
int T = 8;
dim3 th(T, T, T);
dim3 bl((dims[0] + T - 1) / T, (d1 + T - 1) / T, (dims[2] + T - 1) / T);
kern_down3<<< bl, th >>>(dims3, ostrs, (cuFloatComplex*)out, istrs, (const cuFloatComplex*)in, flen, filter);
}
extern "C" void wl3_cuda_up3(const long dims[3], const long out_str[3], _Complex float* out, const long in_str[3], const _Complex float* in, unsigned int flen, const float filter[__VLA(flen)])
{
dim3 dims3(dims[0], dims[1], dims[2]);
dim3 ostrs(out_str[0] / CFL_SIZE, out_str[1] / CFL_SIZE, out_str[2] / CFL_SIZE);
dim3 istrs(in_str[0] / CFL_SIZE, in_str[1] / CFL_SIZE, in_str[2] / CFL_SIZE);
int T = 8;
dim3 th(T, T, T);
dim3 bl((dims[0] + T - 1) / T, (dims[1] + T - 1) / T, (dims[2] + T - 1) / T);
kern_up3<<< bl, th >>>(dims3, ostrs, (cuFloatComplex*)out, istrs, (const cuFloatComplex*)in, flen, filter);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.